Merge branch 'master' into feature/rank-eval

This commit is contained in:
Christoph Büscher 2016-10-14 17:03:30 +02:00
commit cd9d07b91b
153 changed files with 2794 additions and 1650 deletions

View File

@ -19,6 +19,7 @@
package org.elasticsearch.plugin.noop.action.bulk;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.bulk.BulkItemResponse;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkShardRequest;
@ -84,7 +85,7 @@ public class RestNoopBulkAction extends BaseRestHandler {
}
private static class BulkRestBuilderListener extends RestBuilderListener<BulkRequest> {
private final BulkItemResponse ITEM_RESPONSE = new BulkItemResponse(1, "update",
private final BulkItemResponse ITEM_RESPONSE = new BulkItemResponse(1, DocWriteRequest.OpType.UPDATE,
new UpdateResponse(new ShardId("mock", "", 1), "mock_type", "1", 1L, DocWriteResponse.Result.CREATED));
private final RestRequest request;

View File

@ -20,6 +20,7 @@ package org.elasticsearch.plugin.noop.action.bulk;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.bulk.BulkItemResponse;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
@ -34,7 +35,7 @@ import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
public class TransportNoopBulkAction extends HandledTransportAction<BulkRequest, BulkResponse> {
private static final BulkItemResponse ITEM_RESPONSE = new BulkItemResponse(1, "update",
private static final BulkItemResponse ITEM_RESPONSE = new BulkItemResponse(1, DocWriteRequest.OpType.UPDATE,
new UpdateResponse(new ShardId("mock", "", 1), "mock_type", "1", 1L, DocWriteResponse.Result.CREATED));
@Inject

View File

@ -89,12 +89,19 @@ public class Version {
public static final Version V_5_0_0_beta1 = new Version(V_5_0_0_beta1_ID, org.apache.lucene.util.Version.LUCENE_6_2_0);
public static final int V_5_0_0_rc1_ID = 5000051;
public static final Version V_5_0_0_rc1 = new Version(V_5_0_0_rc1_ID, org.apache.lucene.util.Version.LUCENE_6_2_0);
public static final int V_5_0_0_rc2_ID = 5000052;
public static final Version V_5_0_0_rc2 = new Version(V_5_0_0_rc2_ID, org.apache.lucene.util.Version.LUCENE_6_2_0);
public static final int V_6_0_0_alpha1_ID = 6000001;
public static final Version V_6_0_0_alpha1 = new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_2_0);
public static final Version CURRENT = V_6_0_0_alpha1;
/* NOTE: don't add unreleased version to this list except of the version assigned to CURRENT.
* If you need a version that doesn't exist here for instance V_5_1_0 then go and create such a version
* as a constant where you need it:
* <pre>
* public static final Version V_5_1_0_UNRELEASED = Version.fromId(5010099);
* </pre>
* Then go to VersionsTest.java and add a test for this constant VersionTests#testUnknownVersions().
* This is particularly useful if you are building a feature that needs a BWC layer for this unreleased version etc.*/
static {
assert CURRENT.luceneVersion.equals(org.apache.lucene.util.Version.LATEST) : "Version must be upgraded to ["
+ org.apache.lucene.util.Version.LATEST + "] is still set to [" + CURRENT.luceneVersion + "]";
@ -108,8 +115,6 @@ public class Version {
switch (id) {
case V_6_0_0_alpha1_ID:
return V_6_0_0_alpha1;
case V_5_0_0_rc2_ID:
return V_5_0_0_rc2;
case V_5_0_0_rc1_ID:
return V_5_0_0_rc1;
case V_5_0_0_beta1_ID:

View File

@ -0,0 +1,203 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.index.VersionType;
import java.io.IOException;
import java.util.Locale;
/**
* Generic interface to group ActionRequest, which perform writes to a single document
* Action requests implementing this can be part of {@link org.elasticsearch.action.bulk.BulkRequest}
*/
public interface DocWriteRequest<T> extends IndicesRequest {
/**
* Get the index that this request operates on
* @return the index
*/
String index();
/**
* Get the type that this request operates on
* @return the type
*/
String type();
/**
* Get the id of the document for this request
* @return the id
*/
String id();
/**
* Get the options for this request
* @return the indices options
*/
IndicesOptions indicesOptions();
/**
* Set the routing for this request
* @return the Request
*/
T routing(String routing);
/**
* Get the routing for this request
* @return the Routing
*/
String routing();
/**
* Get the parent for this request
* @return the Parent
*/
String parent();
/**
* Get the document version for this request
* @return the document version
*/
long version();
/**
* Sets the version, which will perform the operation only if a matching
* version exists and no changes happened on the doc since then.
*/
T version(long version);
/**
* Get the document version type for this request
* @return the document version type
*/
VersionType versionType();
/**
* Sets the versioning type. Defaults to {@link VersionType#INTERNAL}.
*/
T versionType(VersionType versionType);
/**
* Get the requested document operation type of the request
* @return the operation type {@link OpType}
*/
OpType opType();
/**
* Requested operation type to perform on the document
*/
enum OpType {
/**
* Index the source. If there an existing document with the id, it will
* be replaced.
*/
INDEX(0),
/**
* Creates the resource. Simply adds it to the index, if there is an existing
* document with the id, then it won't be removed.
*/
CREATE(1),
/** Updates a document */
UPDATE(2),
/** Deletes a document */
DELETE(3);
private final byte op;
private final String lowercase;
OpType(int op) {
this.op = (byte) op;
this.lowercase = this.toString().toLowerCase(Locale.ROOT);
}
public byte getId() {
return op;
}
public String getLowercase() {
return lowercase;
}
public static OpType fromId(byte id) {
switch (id) {
case 0: return INDEX;
case 1: return CREATE;
case 2: return UPDATE;
case 3: return DELETE;
default: throw new IllegalArgumentException("Unknown opType: [" + id + "]");
}
}
public static OpType fromString(String sOpType) {
String lowerCase = sOpType.toLowerCase(Locale.ROOT);
for (OpType opType : OpType.values()) {
if (opType.getLowercase().equals(lowerCase)) {
return opType;
}
}
throw new IllegalArgumentException("Unknown opType: [" + sOpType + "]");
}
}
/** read a document write (index/delete/update) request */
static DocWriteRequest readDocumentRequest(StreamInput in) throws IOException {
byte type = in.readByte();
DocWriteRequest docWriteRequest;
if (type == 0) {
IndexRequest indexRequest = new IndexRequest();
indexRequest.readFrom(in);
docWriteRequest = indexRequest;
} else if (type == 1) {
DeleteRequest deleteRequest = new DeleteRequest();
deleteRequest.readFrom(in);
docWriteRequest = deleteRequest;
} else if (type == 2) {
UpdateRequest updateRequest = new UpdateRequest();
updateRequest.readFrom(in);
docWriteRequest = updateRequest;
} else {
throw new IllegalStateException("invalid request type [" + type+ " ]");
}
return docWriteRequest;
}
/** write a document write (index/delete/update) request*/
static void writeDocumentRequest(StreamOutput out, DocWriteRequest request) throws IOException {
if (request instanceof IndexRequest) {
out.writeByte((byte) 0);
((IndexRequest) request).writeTo(out);
} else if (request instanceof DeleteRequest) {
out.writeByte((byte) 1);
((DeleteRequest) request).writeTo(out);
} else if (request instanceof UpdateRequest) {
out.writeByte((byte) 2);
((UpdateRequest) request).writeTo(out);
} else {
throw new IllegalStateException("invalid request [" + request.getClass().getSimpleName() + " ]");
}
}
}

View File

@ -1,73 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action;
import org.elasticsearch.action.support.IndicesOptions;
/**
* Generic interface to group ActionRequest, which work on single document level
*
* Forces this class return index/type/id getters
*/
public interface DocumentRequest<T> extends IndicesRequest {
/**
* Get the index that this request operates on
* @return the index
*/
String index();
/**
* Get the type that this request operates on
* @return the type
*/
String type();
/**
* Get the id of the document for this request
* @return the id
*/
String id();
/**
* Get the options for this request
* @return the indices options
*/
IndicesOptions indicesOptions();
/**
* Set the routing for this request
* @return the Request
*/
T routing(String routing);
/**
* Get the routing for this request
* @return the Routing
*/
String routing();
/**
* Get the parent for this request
* @return the Parent
*/
String parent();
}

View File

@ -20,14 +20,15 @@
package org.elasticsearch.action.admin.indices.validate.query;
import org.elasticsearch.action.support.broadcast.BroadcastShardRequest;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.search.internal.AliasFilter;
import java.io.IOException;
import java.util.Objects;
/**
* Internal validate request executed directly against a specific index shard.
@ -39,21 +40,18 @@ public class ShardValidateQueryRequest extends BroadcastShardRequest {
private boolean explain;
private boolean rewrite;
private long nowInMillis;
@Nullable
private String[] filteringAliases;
private AliasFilter filteringAliases;
public ShardValidateQueryRequest() {
}
ShardValidateQueryRequest(ShardId shardId, @Nullable String[] filteringAliases, ValidateQueryRequest request) {
public ShardValidateQueryRequest(ShardId shardId, AliasFilter filteringAliases, ValidateQueryRequest request) {
super(shardId, request);
this.query = request.query();
this.types = request.types();
this.explain = request.explain();
this.rewrite = request.rewrite();
this.filteringAliases = filteringAliases;
this.filteringAliases = Objects.requireNonNull(filteringAliases, "filteringAliases must not be null");
this.nowInMillis = request.nowInMillis;
}
@ -73,7 +71,7 @@ public class ShardValidateQueryRequest extends BroadcastShardRequest {
return this.rewrite;
}
public String[] filteringAliases() {
public AliasFilter filteringAliases() {
return filteringAliases;
}
@ -93,14 +91,7 @@ public class ShardValidateQueryRequest extends BroadcastShardRequest {
types[i] = in.readString();
}
}
int aliasesSize = in.readVInt();
if (aliasesSize > 0) {
filteringAliases = new String[aliasesSize];
for (int i = 0; i < aliasesSize; i++) {
filteringAliases[i] = in.readString();
}
}
filteringAliases = new AliasFilter(in);
explain = in.readBoolean();
rewrite = in.readBoolean();
nowInMillis = in.readVLong();
@ -110,20 +101,11 @@ public class ShardValidateQueryRequest extends BroadcastShardRequest {
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeNamedWriteable(query);
out.writeVInt(types.length);
for (String type : types) {
out.writeString(type);
}
if (filteringAliases != null) {
out.writeVInt(filteringAliases.length);
for (String alias : filteringAliases) {
out.writeString(alias);
}
} else {
out.writeVInt(0);
}
filteringAliases.writeTo(out);
out.writeBoolean(explain);
out.writeBoolean(rewrite);
out.writeVLong(nowInMillis);

View File

@ -19,7 +19,6 @@
package org.elasticsearch.action.admin.indices.validate.query;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.Query;
import org.elasticsearch.action.ActionListener;
@ -43,6 +42,7 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.query.ParsedQuery;
import org.elasticsearch.index.query.QueryShardException;
import org.elasticsearch.search.SearchService;
import org.elasticsearch.search.internal.AliasFilter;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.internal.ShardSearchLocalRequest;
import org.elasticsearch.tasks.Task;
@ -77,8 +77,9 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<Valid
@Override
protected ShardValidateQueryRequest newShardRequest(int numShards, ShardRouting shard, ValidateQueryRequest request) {
String[] filteringAliases = indexNameExpressionResolver.filteringAliases(clusterService.state(), shard.getIndexName(), request.indices());
return new ShardValidateQueryRequest(shard.shardId(), filteringAliases, request);
final AliasFilter aliasFilter = searchService.buildAliasFilter(clusterService.state(), shard.getIndexName(),
request.indices());
return new ShardValidateQueryRequest(shard.shardId(), aliasFilter, request);
}
@Override
@ -141,8 +142,7 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<Valid
}
@Override
protected ShardValidateQueryResponse shardOperation(ShardValidateQueryRequest request) {
protected ShardValidateQueryResponse shardOperation(ShardValidateQueryRequest request) throws IOException {
boolean valid;
String explanation = null;
String error = null;

View File

@ -19,11 +19,7 @@
package org.elasticsearch.action.bulk;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
@ -33,7 +29,7 @@ import java.io.IOException;
public class BulkItemRequest implements Streamable {
private int id;
private ActionRequest request;
private DocWriteRequest request;
private volatile BulkItemResponse primaryResponse;
private volatile boolean ignoreOnReplica;
@ -41,8 +37,7 @@ public class BulkItemRequest implements Streamable {
}
public BulkItemRequest(int id, ActionRequest request) {
assert request instanceof IndicesRequest;
public BulkItemRequest(int id, DocWriteRequest request) {
this.id = id;
this.request = request;
}
@ -51,14 +46,13 @@ public class BulkItemRequest implements Streamable {
return id;
}
public ActionRequest request() {
public DocWriteRequest request() {
return request;
}
public String index() {
IndicesRequest indicesRequest = (IndicesRequest) request;
assert indicesRequest.indices().length == 1;
return indicesRequest.indices()[0];
assert request.indices().length == 1;
return request.indices()[0];
}
BulkItemResponse getPrimaryResponse() {
@ -89,15 +83,7 @@ public class BulkItemRequest implements Streamable {
@Override
public void readFrom(StreamInput in) throws IOException {
id = in.readVInt();
byte type = in.readByte();
if (type == 0) {
request = new IndexRequest();
} else if (type == 1) {
request = new DeleteRequest();
} else if (type == 2) {
request = new UpdateRequest();
}
request.readFrom(in);
request = DocWriteRequest.readDocumentRequest(in);
if (in.readBoolean()) {
primaryResponse = BulkItemResponse.readBulkItem(in);
}
@ -107,14 +93,7 @@ public class BulkItemRequest implements Streamable {
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(id);
if (request instanceof IndexRequest) {
out.writeByte((byte) 0);
} else if (request instanceof DeleteRequest) {
out.writeByte((byte) 1);
} else if (request instanceof UpdateRequest) {
out.writeByte((byte) 2);
}
request.writeTo(out);
DocWriteRequest.writeDocumentRequest(out, request);
out.writeOptionalStreamable(primaryResponse);
out.writeBoolean(ignoreOnReplica);
}

View File

@ -21,7 +21,9 @@ package org.elasticsearch.action.bulk;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.Version;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.DocWriteRequest.OpType;
import org.elasticsearch.action.delete.DeleteResponse;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.update.UpdateResponse;
@ -50,7 +52,7 @@ public class BulkItemResponse implements Streamable, StatusToXContent {
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(opType);
builder.startObject(opType.getLowercase());
if (failure == null) {
response.toXContent(builder, params);
builder.field(Fields.STATUS, response.status().getStatus());
@ -183,7 +185,7 @@ public class BulkItemResponse implements Streamable, StatusToXContent {
private int id;
private String opType;
private OpType opType;
private DocWriteResponse response;
@ -193,13 +195,13 @@ public class BulkItemResponse implements Streamable, StatusToXContent {
}
public BulkItemResponse(int id, String opType, DocWriteResponse response) {
public BulkItemResponse(int id, OpType opType, DocWriteResponse response) {
this.id = id;
this.opType = opType;
this.response = response;
this.opType = opType;
}
public BulkItemResponse(int id, String opType, Failure failure) {
public BulkItemResponse(int id, OpType opType, Failure failure) {
this.id = id;
this.opType = opType;
this.failure = failure;
@ -215,7 +217,7 @@ public class BulkItemResponse implements Streamable, StatusToXContent {
/**
* The operation type ("index", "create" or "delete").
*/
public String getOpType() {
public OpType getOpType() {
return this.opType;
}
@ -300,7 +302,11 @@ public class BulkItemResponse implements Streamable, StatusToXContent {
@Override
public void readFrom(StreamInput in) throws IOException {
id = in.readVInt();
opType = in.readString();
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
opType = OpType.fromId(in.readByte());
} else {
opType = OpType.fromString(in.readString());
}
byte type = in.readByte();
if (type == 0) {
@ -322,7 +328,11 @@ public class BulkItemResponse implements Streamable, StatusToXContent {
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(id);
out.writeString(opType);
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
out.writeByte(opType.getId());
} else {
out.writeString(opType.getLowercase());
}
if (response == null) {
out.writeByte((byte) 2);

View File

@ -19,7 +19,7 @@
package org.elasticsearch.action.bulk;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.client.Client;
@ -250,24 +250,24 @@ public class BulkProcessor implements Closeable {
* (for example, if no id is provided, one will be generated, or usage of the create flag).
*/
public BulkProcessor add(IndexRequest request) {
return add((ActionRequest<?>) request);
return add((DocWriteRequest) request);
}
/**
* Adds an {@link DeleteRequest} to the list of actions to execute.
*/
public BulkProcessor add(DeleteRequest request) {
return add((ActionRequest<?>) request);
return add((DocWriteRequest) request);
}
/**
* Adds either a delete or an index request.
*/
public BulkProcessor add(ActionRequest<?> request) {
public BulkProcessor add(DocWriteRequest request) {
return add(request, null);
}
public BulkProcessor add(ActionRequest<?> request, @Nullable Object payload) {
public BulkProcessor add(DocWriteRequest request, @Nullable Object payload) {
internalAdd(request, payload);
return this;
}
@ -282,7 +282,7 @@ public class BulkProcessor implements Closeable {
}
}
private synchronized void internalAdd(ActionRequest<?> request, @Nullable Object payload) {
private synchronized void internalAdd(DocWriteRequest request, @Nullable Object payload) {
ensureOpen();
bulkRequest.add(request, payload);
executeIfNeeded();

View File

@ -22,6 +22,7 @@ package org.elasticsearch.action.bulk;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.CompositeIndicesRequest;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.index.IndexRequest;
@ -49,6 +50,7 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
import java.util.stream.Collectors;
import static org.elasticsearch.action.ValidateActions.addValidationError;
@ -70,7 +72,7 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
* {@link WriteRequest}s to this but java doesn't support syntax to declare that everything in the array has both types so we declare
* the one with the least casts.
*/
final List<ActionRequest<?>> requests = new ArrayList<>();
final List<DocWriteRequest> requests = new ArrayList<>();
List<Object> payloads = null;
protected TimeValue timeout = BulkShardRequest.DEFAULT_TIMEOUT;
@ -85,14 +87,14 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
/**
* Adds a list of requests to be executed. Either index or delete requests.
*/
public BulkRequest add(ActionRequest<?>... requests) {
for (ActionRequest<?> request : requests) {
public BulkRequest add(DocWriteRequest... requests) {
for (DocWriteRequest request : requests) {
add(request, null);
}
return this;
}
public BulkRequest add(ActionRequest<?> request) {
public BulkRequest add(DocWriteRequest request) {
return add(request, null);
}
@ -102,7 +104,7 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
* @param payload Optional payload
* @return the current bulk request
*/
public BulkRequest add(ActionRequest<?> request, @Nullable Object payload) {
public BulkRequest add(DocWriteRequest request, @Nullable Object payload) {
if (request instanceof IndexRequest) {
add((IndexRequest) request, payload);
} else if (request instanceof DeleteRequest) {
@ -118,8 +120,8 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
/**
* Adds a list of requests to be executed. Either index or delete requests.
*/
public BulkRequest add(Iterable<ActionRequest<?>> requests) {
for (ActionRequest<?> request : requests) {
public BulkRequest add(Iterable<DocWriteRequest> requests) {
for (DocWriteRequest request : requests) {
add(request);
}
return this;
@ -205,18 +207,13 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
/**
* The list of requests in this bulk request.
*/
public List<ActionRequest<?>> requests() {
public List<DocWriteRequest> requests() {
return this.requests;
}
@Override
public List<? extends IndicesRequest> subRequests() {
List<IndicesRequest> indicesRequests = new ArrayList<>();
for (ActionRequest<?> request : requests) {
assert request instanceof IndicesRequest;
indicesRequests.add((IndicesRequest) request);
}
return indicesRequests;
return requests.stream().collect(Collectors.toList());
}
/**
@ -511,7 +508,7 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
* @return Whether this bulk request contains index request with an ingest pipeline enabled.
*/
public boolean hasIndexRequestsWithPipelines() {
for (ActionRequest<?> actionRequest : requests) {
for (DocWriteRequest actionRequest : requests) {
if (actionRequest instanceof IndexRequest) {
IndexRequest indexRequest = (IndexRequest) actionRequest;
if (Strings.hasText(indexRequest.getPipeline())) {
@ -529,13 +526,13 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
if (requests.isEmpty()) {
validationException = addValidationError("no requests added", validationException);
}
for (ActionRequest<?> request : requests) {
for (DocWriteRequest request : requests) {
// We first check if refresh has been set
if (((WriteRequest<?>) request).getRefreshPolicy() != RefreshPolicy.NONE) {
validationException = addValidationError(
"RefreshPolicy is not supported on an item request. Set it on the BulkRequest instead.", validationException);
}
ActionRequestValidationException ex = request.validate();
ActionRequestValidationException ex = ((WriteRequest<?>) request).validate();
if (ex != null) {
if (validationException == null) {
validationException = new ActionRequestValidationException();
@ -553,20 +550,7 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
waitForActiveShards = ActiveShardCount.readFrom(in);
int size = in.readVInt();
for (int i = 0; i < size; i++) {
byte type = in.readByte();
if (type == 0) {
IndexRequest request = new IndexRequest();
request.readFrom(in);
requests.add(request);
} else if (type == 1) {
DeleteRequest request = new DeleteRequest();
request.readFrom(in);
requests.add(request);
} else if (type == 2) {
UpdateRequest request = new UpdateRequest();
request.readFrom(in);
requests.add(request);
}
requests.add(DocWriteRequest.readDocumentRequest(in));
}
refreshPolicy = RefreshPolicy.readFrom(in);
timeout = new TimeValue(in);
@ -577,15 +561,8 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
super.writeTo(out);
waitForActiveShards.writeTo(out);
out.writeVInt(requests.size());
for (ActionRequest<?> request : requests) {
if (request instanceof IndexRequest) {
out.writeByte((byte) 0);
} else if (request instanceof DeleteRequest) {
out.writeByte((byte) 1);
} else if (request instanceof UpdateRequest) {
out.writeByte((byte) 2);
}
request.writeTo(out);
for (DocWriteRequest request : requests) {
DocWriteRequest.writeDocumentRequest(out, request);
}
refreshPolicy.writeTo(out);
timeout.writeTo(out);

View File

@ -19,12 +19,10 @@
package org.elasticsearch.action.bulk;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.DocumentRequest;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.RoutingMissingException;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
@ -58,16 +56,19 @@ import org.elasticsearch.transport.TransportService;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.LongSupplier;
import java.util.stream.Collectors;
/**
* Groups bulk request items by shard, optionally creating non-existent indices and
* delegates to {@link TransportShardBulkAction} for shard-level bulk execution
*/
public class TransportBulkAction extends HandledTransportAction<BulkRequest, BulkResponse> {
private final AutoCreateIndex autoCreateIndex;
@ -116,15 +117,9 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
if (needToCheck()) {
// Keep track of all unique indices and all unique types per index for the create index requests:
final Set<String> autoCreateIndices = new HashSet<>();
for (ActionRequest request : bulkRequest.requests) {
if (request instanceof DocumentRequest) {
DocumentRequest req = (DocumentRequest) request;
autoCreateIndices.add(req.index());
} else {
throw new ElasticsearchException("Parsed unknown request in bulk actions: " + request.getClass().getSimpleName());
}
}
final Set<String> autoCreateIndices = bulkRequest.requests.stream()
.map(DocWriteRequest::index)
.collect(Collectors.toSet());
final AtomicInteger counter = new AtomicInteger(autoCreateIndices.size());
ClusterState state = clusterService.state();
for (String index : autoCreateIndices) {
@ -150,7 +145,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
if (!(ExceptionsHelper.unwrapCause(e) instanceof IndexAlreadyExistsException)) {
// fail all requests involving this index, if create didnt work
for (int i = 0; i < bulkRequest.requests.size(); i++) {
ActionRequest request = bulkRequest.requests.get(i);
DocWriteRequest request = bulkRequest.requests.get(i);
if (request != null && setResponseFailureIfIndexMatches(responses, i, request, index, e)) {
bulkRequest.requests.set(i, null);
}
@ -185,28 +180,11 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
return autoCreateIndex.shouldAutoCreate(index, state);
}
private boolean setResponseFailureIfIndexMatches(AtomicArray<BulkItemResponse> responses, int idx, ActionRequest request, String index, Exception e) {
if (request instanceof IndexRequest) {
IndexRequest indexRequest = (IndexRequest) request;
if (index.equals(indexRequest.index())) {
responses.set(idx, new BulkItemResponse(idx, "index", new BulkItemResponse.Failure(indexRequest.index(), indexRequest.type(), indexRequest.id(), e)));
private boolean setResponseFailureIfIndexMatches(AtomicArray<BulkItemResponse> responses, int idx, DocWriteRequest request, String index, Exception e) {
if (index.equals(request.index())) {
responses.set(idx, new BulkItemResponse(idx, request.opType(), new BulkItemResponse.Failure(request.index(), request.type(), request.id(), e)));
return true;
}
} else if (request instanceof DeleteRequest) {
DeleteRequest deleteRequest = (DeleteRequest) request;
if (index.equals(deleteRequest.index())) {
responses.set(idx, new BulkItemResponse(idx, "delete", new BulkItemResponse.Failure(deleteRequest.index(), deleteRequest.type(), deleteRequest.id(), e)));
return true;
}
} else if (request instanceof UpdateRequest) {
UpdateRequest updateRequest = (UpdateRequest) request;
if (index.equals(updateRequest.index())) {
responses.set(idx, new BulkItemResponse(idx, "update", new BulkItemResponse.Failure(updateRequest.index(), updateRequest.type(), updateRequest.id(), e)));
return true;
}
} else {
throw new ElasticsearchException("Parsed unknown request in bulk actions: " + request.getClass().getSimpleName());
}
return false;
}
@ -233,95 +211,56 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
final ConcreteIndices concreteIndices = new ConcreteIndices(clusterState, indexNameExpressionResolver);
MetaData metaData = clusterState.metaData();
for (int i = 0; i < bulkRequest.requests.size(); i++) {
ActionRequest request = bulkRequest.requests.get(i);
DocWriteRequest docWriteRequest = bulkRequest.requests.get(i);
//the request can only be null because we set it to null in the previous step, so it gets ignored
if (request == null) {
if (docWriteRequest == null) {
continue;
}
DocumentRequest documentRequest = (DocumentRequest) request;
if (addFailureIfIndexIsUnavailable(documentRequest, bulkRequest, responses, i, concreteIndices, metaData)) {
if (addFailureIfIndexIsUnavailable(docWriteRequest, bulkRequest, responses, i, concreteIndices, metaData)) {
continue;
}
Index concreteIndex = concreteIndices.resolveIfAbsent(documentRequest);
if (request instanceof IndexRequest) {
IndexRequest indexRequest = (IndexRequest) request;
Index concreteIndex = concreteIndices.resolveIfAbsent(docWriteRequest);
try {
switch (docWriteRequest.opType()) {
case CREATE:
case INDEX:
IndexRequest indexRequest = (IndexRequest) docWriteRequest;
MappingMetaData mappingMd = null;
final IndexMetaData indexMetaData = metaData.index(concreteIndex);
if (indexMetaData != null) {
mappingMd = indexMetaData.mappingOrDefault(indexRequest.type());
}
try {
indexRequest.resolveRouting(metaData);
indexRequest.process(mappingMd, allowIdGeneration, concreteIndex.getName());
break;
case UPDATE:
TransportUpdateAction.resolveAndValidateRouting(metaData, concreteIndex.getName(), (UpdateRequest) docWriteRequest);
break;
case DELETE:
TransportDeleteAction.resolveAndValidateRouting(metaData, concreteIndex.getName(), (DeleteRequest) docWriteRequest);
break;
default: throw new AssertionError("request type not supported: [" + docWriteRequest.opType() + "]");
}
} catch (ElasticsearchParseException | RoutingMissingException e) {
BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex.getName(), indexRequest.type(), indexRequest.id(), e);
BulkItemResponse bulkItemResponse = new BulkItemResponse(i, "index", failure);
BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex.getName(), docWriteRequest.type(), docWriteRequest.id(), e);
BulkItemResponse bulkItemResponse = new BulkItemResponse(i, docWriteRequest.opType(), failure);
responses.set(i, bulkItemResponse);
// make sure the request gets never processed again
bulkRequest.requests.set(i, null);
}
} else if (request instanceof DeleteRequest) {
try {
TransportDeleteAction.resolveAndValidateRouting(metaData, concreteIndex.getName(), (DeleteRequest)request);
} catch(RoutingMissingException e) {
BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex.getName(), documentRequest.type(), documentRequest.id(), e);
BulkItemResponse bulkItemResponse = new BulkItemResponse(i, "delete", failure);
responses.set(i, bulkItemResponse);
// make sure the request gets never processed again
bulkRequest.requests.set(i, null);
}
} else if (request instanceof UpdateRequest) {
try {
TransportUpdateAction.resolveAndValidateRouting(metaData, concreteIndex.getName(), (UpdateRequest)request);
} catch(RoutingMissingException e) {
BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex.getName(), documentRequest.type(), documentRequest.id(), e);
BulkItemResponse bulkItemResponse = new BulkItemResponse(i, "update", failure);
responses.set(i, bulkItemResponse);
// make sure the request gets never processed again
bulkRequest.requests.set(i, null);
}
} else {
throw new AssertionError("request type not supported: [" + request.getClass().getName() + "]");
}
}
// first, go over all the requests and create a ShardId -> Operations mapping
Map<ShardId, List<BulkItemRequest>> requestsByShard = new HashMap<>();
for (int i = 0; i < bulkRequest.requests.size(); i++) {
ActionRequest request = bulkRequest.requests.get(i);
if (request instanceof IndexRequest) {
IndexRequest indexRequest = (IndexRequest) request;
String concreteIndex = concreteIndices.getConcreteIndex(indexRequest.index()).getName();
ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, indexRequest.id(), indexRequest.routing()).shardId();
List<BulkItemRequest> list = requestsByShard.get(shardId);
if (list == null) {
list = new ArrayList<>();
requestsByShard.put(shardId, list);
}
list.add(new BulkItemRequest(i, request));
} else if (request instanceof DeleteRequest) {
DeleteRequest deleteRequest = (DeleteRequest) request;
String concreteIndex = concreteIndices.getConcreteIndex(deleteRequest.index()).getName();
ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, deleteRequest.id(), deleteRequest.routing()).shardId();
List<BulkItemRequest> list = requestsByShard.get(shardId);
if (list == null) {
list = new ArrayList<>();
requestsByShard.put(shardId, list);
}
list.add(new BulkItemRequest(i, request));
} else if (request instanceof UpdateRequest) {
UpdateRequest updateRequest = (UpdateRequest) request;
String concreteIndex = concreteIndices.getConcreteIndex(updateRequest.index()).getName();
ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, updateRequest.id(), updateRequest.routing()).shardId();
List<BulkItemRequest> list = requestsByShard.get(shardId);
if (list == null) {
list = new ArrayList<>();
requestsByShard.put(shardId, list);
}
list.add(new BulkItemRequest(i, request));
DocWriteRequest request = bulkRequest.requests.get(i);
if (request == null) {
continue;
}
String concreteIndex = concreteIndices.getConcreteIndex(request.index()).getName();
ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, request.id(), request.routing()).shardId();
List<BulkItemRequest> shardRequests = requestsByShard.computeIfAbsent(shardId, shard -> new ArrayList<>());
shardRequests.add(new BulkItemRequest(i, request));
}
if (requestsByShard.isEmpty()) {
@ -361,19 +300,9 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
// create failures for all relevant requests
for (BulkItemRequest request : requests) {
final String indexName = concreteIndices.getConcreteIndex(request.index()).getName();
if (request.request() instanceof IndexRequest) {
IndexRequest indexRequest = (IndexRequest) request.request();
responses.set(request.id(), new BulkItemResponse(request.id(), indexRequest.opType().toString().toLowerCase(Locale.ENGLISH),
new BulkItemResponse.Failure(indexName, indexRequest.type(), indexRequest.id(), e)));
} else if (request.request() instanceof DeleteRequest) {
DeleteRequest deleteRequest = (DeleteRequest) request.request();
responses.set(request.id(), new BulkItemResponse(request.id(), "delete",
new BulkItemResponse.Failure(indexName, deleteRequest.type(), deleteRequest.id(), e)));
} else if (request.request() instanceof UpdateRequest) {
UpdateRequest updateRequest = (UpdateRequest) request.request();
responses.set(request.id(), new BulkItemResponse(request.id(), "update",
new BulkItemResponse.Failure(indexName, updateRequest.type(), updateRequest.id(), e)));
}
DocWriteRequest docWriteRequest = request.request();
responses.set(request.id(), new BulkItemResponse(request.id(), docWriteRequest.opType(),
new BulkItemResponse.Failure(indexName, docWriteRequest.type(), docWriteRequest.id(), e)));
}
if (counter.decrementAndGet() == 0) {
finishHim();
@ -387,7 +316,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
}
}
private boolean addFailureIfIndexIsUnavailable(DocumentRequest request, BulkRequest bulkRequest, AtomicArray<BulkItemResponse> responses, int idx,
private boolean addFailureIfIndexIsUnavailable(DocWriteRequest request, BulkRequest bulkRequest, AtomicArray<BulkItemResponse> responses, int idx,
final ConcreteIndices concreteIndices,
final MetaData metaData) {
Index concreteIndex = concreteIndices.getConcreteIndex(request.index());
@ -410,15 +339,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
if (unavailableException != null) {
BulkItemResponse.Failure failure = new BulkItemResponse.Failure(request.index(), request.type(), request.id(),
unavailableException);
String operationType = "unknown";
if (request instanceof IndexRequest) {
operationType = "index";
} else if (request instanceof DeleteRequest) {
operationType = "delete";
} else if (request instanceof UpdateRequest) {
operationType = "update";
}
BulkItemResponse bulkItemResponse = new BulkItemResponse(idx, operationType, failure);
BulkItemResponse bulkItemResponse = new BulkItemResponse(idx, request.opType(), failure);
responses.set(idx, bulkItemResponse);
// make sure the request gets never processed again
bulkRequest.requests.set(idx, null);
@ -441,7 +362,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
return indices.get(indexOrAlias);
}
Index resolveIfAbsent(DocumentRequest request) {
Index resolveIfAbsent(DocWriteRequest request) {
Index concreteIndex = indices.get(request.index());
if (concreteIndex == null) {
concreteIndex = indexNameExpressionResolver.concreteSingleIndex(state, request);

View File

@ -23,7 +23,8 @@ import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.delete.DeleteResponse;
import org.elasticsearch.action.delete.TransportDeleteAction;
@ -31,9 +32,8 @@ import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.index.TransportIndexAction;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.replication.ReplicationRequest;
import org.elasticsearch.action.support.replication.ReplicationResponse.ShardInfo;
import org.elasticsearch.action.support.replication.TransportWriteAction;
import org.elasticsearch.action.support.replication.ReplicationResponse.ShardInfo;
import org.elasticsearch.action.update.UpdateHelper;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.action.update.UpdateResponse;
@ -53,11 +53,9 @@ import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.VersionConflictEngineException;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.translog.Translog;
import org.elasticsearch.index.translog.Translog.Location;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportRequestOptions;
import org.elasticsearch.transport.TransportService;
@ -67,14 +65,9 @@ import java.util.Map;
import static org.elasticsearch.action.support.replication.ReplicationOperation.ignoreReplicaException;
import static org.elasticsearch.action.support.replication.ReplicationOperation.isConflictException;
/**
* Performs the index operation.
*/
/** Performs shard-level bulk (index, delete or update) operations */
public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequest, BulkShardResponse> {
private static final String OP_TYPE_UPDATE = "update";
private static final String OP_TYPE_DELETE = "delete";
public static final String ACTION_NAME = BulkAction.NAME + "[s]";
private final UpdateHelper updateHelper;
@ -116,8 +109,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
VersionType[] preVersionTypes = new VersionType[request.items().length];
Translog.Location location = null;
for (int requestIndex = 0; requestIndex < request.items().length; requestIndex++) {
BulkItemRequest item = request.items()[requestIndex];
location = handleItem(metaData, request, primary, preVersions, preVersionTypes, location, requestIndex, item);
location = executeBulkItemRequest(metaData, primary, request, preVersions, preVersionTypes, location, requestIndex);
}
BulkItemResponse[] responses = new BulkItemResponse[request.items().length];
@ -129,205 +121,89 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
return new WriteResult<>(response, location);
}
private Translog.Location handleItem(IndexMetaData metaData, BulkShardRequest request, IndexShard indexShard, long[] preVersions, VersionType[] preVersionTypes, Translog.Location location, int requestIndex, BulkItemRequest item) {
if (item.request() instanceof IndexRequest) {
location = index(metaData, request, indexShard, preVersions, preVersionTypes, location, requestIndex, item);
} else if (item.request() instanceof DeleteRequest) {
location = delete(request, indexShard, preVersions, preVersionTypes, location, requestIndex, item);
} else if (item.request() instanceof UpdateRequest) {
Tuple<Translog.Location, BulkItemRequest> tuple = update(metaData, request, indexShard, preVersions, preVersionTypes, location, requestIndex, item);
location = tuple.v1();
item = tuple.v2();
/** Executes bulk item requests and handles request execution exceptions */
private Translog.Location executeBulkItemRequest(IndexMetaData metaData, IndexShard indexShard,
BulkShardRequest request,
long[] preVersions, VersionType[] preVersionTypes,
Translog.Location location, int requestIndex) {
preVersions[requestIndex] = request.items()[requestIndex].request().version();
preVersionTypes[requestIndex] = request.items()[requestIndex].request().versionType();
DocWriteRequest.OpType opType = request.items()[requestIndex].request().opType();
try {
WriteResult<? extends DocWriteResponse> writeResult = innerExecuteBulkItemRequest(metaData, indexShard,
request, requestIndex);
if (writeResult.getLocation() != null) {
location = locationToSync(location, writeResult.getLocation());
} else {
throw new IllegalStateException("Unexpected index operation: " + item.request());
assert writeResult.getResponse().getResult() == DocWriteResponse.Result.NOOP
: "only noop operation can have null next operation";
}
assert item.getPrimaryResponse() != null;
// update the bulk item request because update request execution can mutate the bulk item request
BulkItemRequest item = request.items()[requestIndex];
// add the response
setResponse(item, new BulkItemResponse(item.id(), opType, writeResult.getResponse()));
} catch (Exception e) {
// rethrow the failure if we are going to retry on primary and let parent failure to handle it
if (retryPrimaryException(e)) {
// restore updated versions...
for (int j = 0; j < requestIndex; j++) {
DocWriteRequest docWriteRequest = request.items()[j].request();
docWriteRequest.version(preVersions[j]);
docWriteRequest.versionType(preVersionTypes[j]);
}
throw (ElasticsearchException) e;
}
BulkItemRequest item = request.items()[requestIndex];
DocWriteRequest docWriteRequest = item.request();
if (isConflictException(e)) {
logger.trace((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}",
request.shardId(), docWriteRequest.opType().getLowercase(), request), e);
} else {
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}",
request.shardId(), docWriteRequest.opType().getLowercase(), request), e);
}
// if its a conflict failure, and we already executed the request on a primary (and we execute it
// again, due to primary relocation and only processing up to N bulk items when the shard gets closed)
// then just use the response we got from the successful execution
if (item.getPrimaryResponse() != null && isConflictException(e)) {
setResponse(item, item.getPrimaryResponse());
} else {
setResponse(item, new BulkItemResponse(item.id(), docWriteRequest.opType(),
new BulkItemResponse.Failure(request.index(), docWriteRequest.type(), docWriteRequest.id(), e)));
}
}
assert request.items()[requestIndex].getPrimaryResponse() != null;
assert preVersionTypes[requestIndex] != null;
return location;
}
private Translog.Location index(IndexMetaData metaData, BulkShardRequest request, IndexShard indexShard, long[] preVersions, VersionType[] preVersionTypes, Translog.Location location, int requestIndex, BulkItemRequest item) {
IndexRequest indexRequest = (IndexRequest) item.request();
preVersions[requestIndex] = indexRequest.version();
preVersionTypes[requestIndex] = indexRequest.versionType();
private WriteResult<? extends DocWriteResponse> innerExecuteBulkItemRequest(IndexMetaData metaData, IndexShard indexShard,
BulkShardRequest request, int requestIndex) throws Exception {
DocWriteRequest itemRequest = request.items()[requestIndex].request();
switch (itemRequest.opType()) {
case CREATE:
case INDEX:
return TransportIndexAction.executeIndexRequestOnPrimary(((IndexRequest) itemRequest), indexShard, mappingUpdatedAction);
case UPDATE:
int maxAttempts = ((UpdateRequest) itemRequest).retryOnConflict();
for (int attemptCount = 0; attemptCount <= maxAttempts; attemptCount++) {
try {
WriteResult<IndexResponse> result = shardIndexOperation(request, indexRequest, metaData, indexShard, true);
location = locationToSync(location, result.getLocation());
// add the response
IndexResponse indexResponse = result.getResponse();
setResponse(item, new BulkItemResponse(item.id(), indexRequest.opType().lowercase(), indexResponse));
return shardUpdateOperation(metaData, indexShard, request, requestIndex, ((UpdateRequest) itemRequest));
} catch (Exception e) {
// rethrow the failure if we are going to retry on primary and let parent failure to handle it
if (retryPrimaryException(e)) {
// restore updated versions...
for (int j = 0; j < requestIndex; j++) {
applyVersion(request.items()[j], preVersions[j], preVersionTypes[j]);
}
throw (ElasticsearchException) e;
}
logFailure(e, "index", request.shardId(), indexRequest);
// if its a conflict failure, and we already executed the request on a primary (and we execute it
// again, due to primary relocation and only processing up to N bulk items when the shard gets closed)
// then just use the response we got from the successful execution
if (item.getPrimaryResponse() != null && isConflictException(e)) {
setResponse(item, item.getPrimaryResponse());
} else {
setResponse(item, new BulkItemResponse(item.id(), indexRequest.opType().lowercase(),
new BulkItemResponse.Failure(request.index(), indexRequest.type(), indexRequest.id(), e)));
final Throwable cause = ExceptionsHelper.unwrapCause(e);
if (attemptCount == maxAttempts // bubble up exception when we run out of attempts
|| (cause instanceof VersionConflictEngineException) == false) { // or when exception is not a version conflict
throw e;
}
}
return location;
}
private <ReplicationRequestT extends ReplicationRequest<ReplicationRequestT>> void logFailure(Throwable t, String operation, ShardId shardId, ReplicationRequest<ReplicationRequestT> request) {
if (ExceptionsHelper.status(t) == RestStatus.CONFLICT) {
logger.trace((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", shardId, operation, request), t);
} else {
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", shardId, operation, request), t);
throw new IllegalStateException("version conflict exception should bubble up on last attempt");
case DELETE:
return TransportDeleteAction.executeDeleteRequestOnPrimary(((DeleteRequest) itemRequest), indexShard);
default: throw new IllegalStateException("unexpected opType [" + itemRequest.opType() + "] found");
}
}
private Translog.Location delete(BulkShardRequest request, IndexShard indexShard, long[] preVersions, VersionType[] preVersionTypes, Translog.Location location, int requestIndex, BulkItemRequest item) {
DeleteRequest deleteRequest = (DeleteRequest) item.request();
preVersions[requestIndex] = deleteRequest.version();
preVersionTypes[requestIndex] = deleteRequest.versionType();
try {
// add the response
final WriteResult<DeleteResponse> writeResult = TransportDeleteAction.executeDeleteRequestOnPrimary(deleteRequest, indexShard);
DeleteResponse deleteResponse = writeResult.getResponse();
location = locationToSync(location, writeResult.getLocation());
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_DELETE, deleteResponse));
} catch (Exception e) {
// rethrow the failure if we are going to retry on primary and let parent failure to handle it
if (retryPrimaryException(e)) {
// restore updated versions...
for (int j = 0; j < requestIndex; j++) {
applyVersion(request.items()[j], preVersions[j], preVersionTypes[j]);
}
throw (ElasticsearchException) e;
}
logFailure(e, "delete", request.shardId(), deleteRequest);
// if its a conflict failure, and we already executed the request on a primary (and we execute it
// again, due to primary relocation and only processing up to N bulk items when the shard gets closed)
// then just use the response we got from the successful execution
if (item.getPrimaryResponse() != null && isConflictException(e)) {
setResponse(item, item.getPrimaryResponse());
} else {
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_DELETE,
new BulkItemResponse.Failure(request.index(), deleteRequest.type(), deleteRequest.id(), e)));
}
}
return location;
}
private Tuple<Translog.Location, BulkItemRequest> update(IndexMetaData metaData, BulkShardRequest request, IndexShard indexShard, long[] preVersions, VersionType[] preVersionTypes, Translog.Location location, int requestIndex, BulkItemRequest item) {
UpdateRequest updateRequest = (UpdateRequest) item.request();
preVersions[requestIndex] = updateRequest.version();
preVersionTypes[requestIndex] = updateRequest.versionType();
// We need to do the requested retries plus the initial attempt. We don't do < 1+retry_on_conflict because retry_on_conflict may be Integer.MAX_VALUE
for (int updateAttemptsCount = 0; updateAttemptsCount <= updateRequest.retryOnConflict(); updateAttemptsCount++) {
UpdateResult updateResult;
try {
updateResult = shardUpdateOperation(metaData, request, updateRequest, indexShard);
} catch (Exception t) {
updateResult = new UpdateResult(null, null, false, t, null);
}
if (updateResult.success()) {
if (updateResult.writeResult != null) {
location = locationToSync(location, updateResult.writeResult.getLocation());
}
switch (updateResult.result.getResponseResult()) {
case CREATED:
case UPDATED:
@SuppressWarnings("unchecked")
WriteResult<IndexResponse> result = updateResult.writeResult;
IndexRequest indexRequest = updateResult.request();
BytesReference indexSourceAsBytes = indexRequest.source();
// add the response
IndexResponse indexResponse = result.getResponse();
UpdateResponse updateResponse = new UpdateResponse(indexResponse.getShardInfo(), indexResponse.getShardId(), indexResponse.getType(), indexResponse.getId(), indexResponse.getVersion(), indexResponse.getResult());
if ((updateRequest.fetchSource() != null && updateRequest.fetchSource().fetchSource()) ||
(updateRequest.fields() != null && updateRequest.fields().length > 0)) {
Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(indexSourceAsBytes, true);
updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), indexResponse.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), indexSourceAsBytes));
}
item = request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), indexRequest);
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, updateResponse));
break;
case DELETED:
@SuppressWarnings("unchecked")
WriteResult<DeleteResponse> writeResult = updateResult.writeResult;
DeleteResponse response = writeResult.getResponse();
DeleteRequest deleteRequest = updateResult.request();
updateResponse = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), response.getResult());
updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), response.getVersion(), updateResult.result.updatedSourceAsMap(), updateResult.result.updateSourceContentType(), null));
// Replace the update request to the translated delete request to execute on the replica.
item = request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), deleteRequest);
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, updateResponse));
break;
case NOOP:
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, updateResult.noopResult));
item.setIgnoreOnReplica(); // no need to go to the replica
break;
default:
throw new IllegalStateException("Illegal operation " + updateResult.result.getResponseResult());
}
// NOTE: Breaking out of the retry_on_conflict loop!
break;
} else if (updateResult.failure()) {
Throwable e = updateResult.error;
if (updateResult.retry) {
// updateAttemptCount is 0 based and marks current attempt, if it's equal to retryOnConflict we are going out of the iteration
if (updateAttemptsCount >= updateRequest.retryOnConflict()) {
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE,
new BulkItemResponse.Failure(request.index(), updateRequest.type(), updateRequest.id(), e)));
}
} else {
// rethrow the failure if we are going to retry on primary and let parent failure to handle it
if (retryPrimaryException(e)) {
// restore updated versions...
for (int j = 0; j < requestIndex; j++) {
applyVersion(request.items()[j], preVersions[j], preVersionTypes[j]);
}
throw (ElasticsearchException) e;
}
// if its a conflict failure, and we already executed the request on a primary (and we execute it
// again, due to primary relocation and only processing up to N bulk items when the shard gets closed)
// then just use the response we got from the successful execution
if (item.getPrimaryResponse() != null && isConflictException(e)) {
setResponse(item, item.getPrimaryResponse());
} else if (updateResult.result == null) {
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, new BulkItemResponse.Failure(request.index(), updateRequest.type(), updateRequest.id(), e)));
} else {
switch (updateResult.result.getResponseResult()) {
case CREATED:
case UPDATED:
IndexRequest indexRequest = updateResult.request();
logFailure(e, "index", request.shardId(), indexRequest);
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE,
new BulkItemResponse.Failure(request.index(), indexRequest.type(), indexRequest.id(), e)));
break;
case DELETED:
DeleteRequest deleteRequest = updateResult.request();
logFailure(e, "delete", request.shardId(), deleteRequest);
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_DELETE,
new BulkItemResponse.Failure(request.index(), deleteRequest.type(), deleteRequest.id(), e)));
break;
default:
throw new IllegalStateException("Illegal operation " + updateResult.result.getResponseResult());
}
}
// NOTE: Breaking out of the retry_on_conflict loop!
break;
}
}
}
return Tuple.tuple(location, item);
}
private void setResponse(BulkItemRequest request, BulkItemResponse response) {
request.setPrimaryResponse(response);
if (response.isFailed()) {
@ -338,105 +214,49 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
}
}
private WriteResult<IndexResponse> shardIndexOperation(BulkShardRequest request, IndexRequest indexRequest, IndexMetaData metaData,
IndexShard indexShard, boolean processed) throws Exception {
MappingMetaData mappingMd = metaData.mappingOrDefault(indexRequest.type());
if (!processed) {
indexRequest.process(mappingMd, allowIdGeneration, request.index());
}
return TransportIndexAction.executeIndexRequestOnPrimary(indexRequest, indexShard, mappingUpdatedAction);
}
static class UpdateResult {
final UpdateHelper.Result result;
final ActionRequest actionRequest;
final boolean retry;
final Throwable error;
final WriteResult writeResult;
final UpdateResponse noopResult;
UpdateResult(UpdateHelper.Result result, ActionRequest actionRequest, boolean retry, Throwable error, WriteResult writeResult) {
this.result = result;
this.actionRequest = actionRequest;
this.retry = retry;
this.error = error;
this.writeResult = writeResult;
this.noopResult = null;
}
UpdateResult(UpdateHelper.Result result, ActionRequest actionRequest, WriteResult writeResult) {
this.result = result;
this.actionRequest = actionRequest;
this.writeResult = writeResult;
this.retry = false;
this.error = null;
this.noopResult = null;
}
public UpdateResult(UpdateHelper.Result result, UpdateResponse updateResponse) {
this.result = result;
this.noopResult = updateResponse;
this.actionRequest = null;
this.writeResult = null;
this.retry = false;
this.error = null;
}
boolean failure() {
return error != null;
}
boolean success() {
return noopResult != null || writeResult != null;
}
@SuppressWarnings("unchecked")
<T extends ActionRequest> T request() {
return (T) actionRequest;
}
}
private UpdateResult shardUpdateOperation(IndexMetaData metaData, BulkShardRequest bulkShardRequest, UpdateRequest updateRequest, IndexShard indexShard) {
/**
* Executes update request, doing a get and translating update to a index or delete operation
* NOTE: all operations except NOOP, reassigns the bulk item request
*/
private WriteResult<? extends DocWriteResponse> shardUpdateOperation(IndexMetaData metaData, IndexShard indexShard,
BulkShardRequest request,
int requestIndex, UpdateRequest updateRequest)
throws Exception {
// Todo: capture read version conflicts, missing documents and malformed script errors in the write result due to get request
UpdateHelper.Result translate = updateHelper.prepare(updateRequest, indexShard, threadPool::estimatedTimeInMillis);
switch (translate.getResponseResult()) {
case CREATED:
case UPDATED:
IndexRequest indexRequest = translate.action();
try {
WriteResult result = shardIndexOperation(bulkShardRequest, indexRequest, metaData, indexShard, false);
return new UpdateResult(translate, indexRequest, result);
} catch (Exception e) {
final Throwable cause = ExceptionsHelper.unwrapCause(e);
boolean retry = false;
if (cause instanceof VersionConflictEngineException) {
retry = true;
}
return new UpdateResult(translate, indexRequest, retry, cause, null);
MappingMetaData mappingMd = metaData.mappingOrDefault(indexRequest.type());
indexRequest.process(mappingMd, allowIdGeneration, request.index());
WriteResult<IndexResponse> writeResult = TransportIndexAction.executeIndexRequestOnPrimary(indexRequest, indexShard, mappingUpdatedAction);
BytesReference indexSourceAsBytes = indexRequest.source();
IndexResponse indexResponse = writeResult.getResponse();
UpdateResponse update = new UpdateResponse(indexResponse.getShardInfo(), indexResponse.getShardId(), indexResponse.getType(), indexResponse.getId(), indexResponse.getVersion(), indexResponse.getResult());
if ((updateRequest.fetchSource() != null && updateRequest.fetchSource().fetchSource()) ||
(updateRequest.fields() != null && updateRequest.fields().length > 0)) {
Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(indexSourceAsBytes, true);
update.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), indexResponse.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), indexSourceAsBytes));
}
// Replace the update request to the translated index request to execute on the replica.
request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), indexRequest);
return new WriteResult<>(update, writeResult.getLocation());
case DELETED:
DeleteRequest deleteRequest = translate.action();
try {
WriteResult<DeleteResponse> result = TransportDeleteAction.executeDeleteRequestOnPrimary(deleteRequest, indexShard);
return new UpdateResult(translate, deleteRequest, result);
} catch (Exception e) {
final Throwable cause = ExceptionsHelper.unwrapCause(e);
boolean retry = false;
if (cause instanceof VersionConflictEngineException) {
retry = true;
}
return new UpdateResult(translate, deleteRequest, retry, cause, null);
}
WriteResult<DeleteResponse> deleteResult = TransportDeleteAction.executeDeleteRequestOnPrimary(deleteRequest, indexShard);
DeleteResponse response = deleteResult.getResponse();
UpdateResponse deleteUpdateResponse = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), response.getResult());
deleteUpdateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), response.getVersion(), translate.updatedSourceAsMap(), translate.updateSourceContentType(), null));
// Replace the update request to the translated delete request to execute on the replica.
request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), deleteRequest);
return new WriteResult<>(deleteUpdateResponse, deleteResult.getLocation());
case NOOP:
UpdateResponse updateResponse = translate.action();
BulkItemRequest item = request.items()[requestIndex];
indexShard.noopUpdate(updateRequest.type());
return new UpdateResult(translate, updateResponse);
default:
throw new IllegalStateException("Illegal update operation " + translate.getResponseResult());
item.setIgnoreOnReplica(); // no need to go to the replica
return new WriteResult<>(translate.action(), null);
default: throw new IllegalStateException("Illegal update operation " + translate.getResponseResult());
}
}
@ -448,10 +268,20 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
if (item == null || item.isIgnoreOnReplica()) {
continue;
}
if (item.request() instanceof IndexRequest) {
IndexRequest indexRequest = (IndexRequest) item.request();
DocWriteRequest docWriteRequest = item.request();
final Engine.Operation operation;
try {
Engine.Index operation = TransportIndexAction.executeIndexRequestOnReplica(indexRequest, indexShard);
switch (docWriteRequest.opType()) {
case CREATE:
case INDEX:
operation = TransportIndexAction.executeIndexRequestOnReplica(((IndexRequest) docWriteRequest), indexShard);
break;
case DELETE:
operation = TransportDeleteAction.executeDeleteRequestOnReplica(((DeleteRequest) docWriteRequest), indexShard);
break;
default: throw new IllegalStateException("Unexpected request operation type on replica: "
+ docWriteRequest.opType().getLowercase());
}
location = locationToSync(location, operation.getTranslogLocation());
} catch (Exception e) {
// if its not an ignore replica failure, we need to make sure to bubble up the failure
@ -460,38 +290,10 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
throw e;
}
}
} else if (item.request() instanceof DeleteRequest) {
DeleteRequest deleteRequest = (DeleteRequest) item.request();
try {
Engine.Delete delete = TransportDeleteAction.executeDeleteRequestOnReplica(deleteRequest, indexShard);
indexShard.delete(delete);
location = locationToSync(location, delete.getTranslogLocation());
} catch (Exception e) {
// if its not an ignore replica failure, we need to make sure to bubble up the failure
// so we will fail the shard
if (!ignoreReplicaException(e)) {
throw e;
}
}
} else {
throw new IllegalStateException("Unexpected index operation: " + item.request());
}
}
return location;
}
private void applyVersion(BulkItemRequest item, long version, VersionType versionType) {
if (item.request() instanceof IndexRequest) {
((IndexRequest) item.request()).version(version).versionType(versionType);
} else if (item.request() instanceof DeleteRequest) {
((DeleteRequest) item.request()).version(version).versionType();
} else if (item.request() instanceof UpdateRequest) {
((UpdateRequest) item.request()).version(version).versionType();
} else {
// log?
}
}
private Translog.Location locationToSync(Translog.Location current, Translog.Location next) {
/* here we are moving forward in the translog with each operation. Under the hood
* this might cross translog files which is ok since from the user perspective

View File

@ -20,7 +20,7 @@
package org.elasticsearch.action.delete;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.DocumentRequest;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.support.replication.ReplicatedWriteRequest;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
@ -43,7 +43,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
* @see org.elasticsearch.client.Client#delete(DeleteRequest)
* @see org.elasticsearch.client.Requests#deleteRequest(String)
*/
public class DeleteRequest extends ReplicatedWriteRequest<DeleteRequest> implements DocumentRequest<DeleteRequest> {
public class DeleteRequest extends ReplicatedWriteRequest<DeleteRequest> implements DocWriteRequest<DeleteRequest> {
private String type;
private String id;
@ -164,28 +164,33 @@ public class DeleteRequest extends ReplicatedWriteRequest<DeleteRequest> impleme
return this.routing;
}
/**
* Sets the version, which will cause the delete operation to only be performed if a matching
* version exists and no changes happened on the doc since then.
*/
@Override
public DeleteRequest version(long version) {
this.version = version;
return this;
}
@Override
public long version() {
return this.version;
}
@Override
public DeleteRequest versionType(VersionType versionType) {
this.versionType = versionType;
return this;
}
@Override
public VersionType versionType() {
return this.versionType;
}
@Override
public OpType opType() {
return OpType.DELETE;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);

View File

@ -27,6 +27,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import org.elasticsearch.search.internal.AliasFilter;
import java.io.IOException;
@ -43,7 +44,7 @@ public class ExplainRequest extends SingleShardRequest<ExplainRequest> {
private String[] storedFields;
private FetchSourceContext fetchSourceContext;
private String[] filteringAlias = Strings.EMPTY_ARRAY;
private AliasFilter filteringAlias = new AliasFilter(null, Strings.EMPTY_ARRAY);
long nowInMillis;
@ -131,11 +132,11 @@ public class ExplainRequest extends SingleShardRequest<ExplainRequest> {
return this;
}
public String[] filteringAlias() {
public AliasFilter filteringAlias() {
return filteringAlias;
}
public ExplainRequest filteringAlias(String[] filteringAlias) {
public ExplainRequest filteringAlias(AliasFilter filteringAlias) {
if (filteringAlias != null) {
this.filteringAlias = filteringAlias;
}
@ -166,7 +167,7 @@ public class ExplainRequest extends SingleShardRequest<ExplainRequest> {
routing = in.readOptionalString();
preference = in.readOptionalString();
query = in.readNamedWriteable(QueryBuilder.class);
filteringAlias = in.readStringArray();
filteringAlias = new AliasFilter(in);
storedFields = in.readOptionalStringArray();
fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::new);
nowInMillis = in.readVLong();
@ -180,7 +181,7 @@ public class ExplainRequest extends SingleShardRequest<ExplainRequest> {
out.writeOptionalString(routing);
out.writeOptionalString(preference);
out.writeNamedWriteable(query);
out.writeStringArray(filteringAlias);
filteringAlias.writeTo(out);
out.writeOptionalStringArray(storedFields);
out.writeOptionalWriteable(fetchSourceContext);
out.writeVLong(nowInMillis);

View File

@ -39,6 +39,7 @@ import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.index.mapper.UidFieldMapper;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.search.SearchService;
import org.elasticsearch.search.internal.AliasFilter;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.internal.ShardSearchLocalRequest;
import org.elasticsearch.search.rescore.RescoreSearchContext;
@ -78,7 +79,9 @@ public class TransportExplainAction extends TransportSingleShardAction<ExplainRe
@Override
protected void resolveRequest(ClusterState state, InternalRequest request) {
request.request().filteringAlias(indexNameExpressionResolver.filteringAliases(state, request.concreteIndex(), request.request().index()));
final AliasFilter aliasFilter = searchService.buildAliasFilter(state, request.concreteIndex(),
request.request().index());
request.request().filteringAlias(aliasFilter);
// Fail fast on the node that received the request.
if (request.request().routing() == null && state.getMetaData().routingRequired(request.concreteIndex(), request.request().type())) {
throw new RoutingMissingException(request.concreteIndex(), request.request().type(), request.request().id());
@ -86,7 +89,7 @@ public class TransportExplainAction extends TransportSingleShardAction<ExplainRe
}
@Override
protected ExplainResponse shardOperation(ExplainRequest request, ShardId shardId) {
protected ExplainResponse shardOperation(ExplainRequest request, ShardId shardId) throws IOException {
ShardSearchLocalRequest shardSearchLocalRequest = new ShardSearchLocalRequest(shardId,
new String[]{request.type()}, request.nowInMillis, request.filteringAlias());
SearchContext context = searchService.createSearchContext(shardSearchLocalRequest, SearchService.NO_TIMEOUT, null);

View File

@ -21,7 +21,7 @@ package org.elasticsearch.action.index;
import org.elasticsearch.ElasticsearchGenerationException;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.DocumentRequest;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.RoutingMissingException;
import org.elasticsearch.action.TimestampParsingException;
import org.elasticsearch.action.support.replication.ReplicatedWriteRequest;
@ -67,68 +67,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
* @see org.elasticsearch.client.Requests#indexRequest(String)
* @see org.elasticsearch.client.Client#index(IndexRequest)
*/
public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implements DocumentRequest<IndexRequest> {
/**
* Operation type controls if the type of the index operation.
*/
public enum OpType {
/**
* Index the source. If there an existing document with the id, it will
* be replaced.
*/
INDEX((byte) 0),
/**
* Creates the resource. Simply adds it to the index, if there is an existing
* document with the id, then it won't be removed.
*/
CREATE((byte) 1);
private final byte id;
private final String lowercase;
OpType(byte id) {
this.id = id;
this.lowercase = this.toString().toLowerCase(Locale.ENGLISH);
}
/**
* The internal representation of the operation type.
*/
public byte id() {
return id;
}
public String lowercase() {
return this.lowercase;
}
/**
* Constructs the operation type from its internal representation.
*/
public static OpType fromId(byte id) {
if (id == 0) {
return INDEX;
} else if (id == 1) {
return CREATE;
} else {
throw new IllegalArgumentException("No type match for [" + id + "]");
}
}
public static OpType fromString(String sOpType) {
String lowersOpType = sOpType.toLowerCase(Locale.ROOT);
switch (lowersOpType) {
case "create":
return OpType.CREATE;
case "index":
return OpType.INDEX;
default:
throw new IllegalArgumentException("opType [" + sOpType + "] not allowed, either [index] or [create] are allowed");
}
}
}
public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implements DocWriteRequest<IndexRequest> {
private String type;
private String id;
@ -526,6 +465,9 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
* Sets the type of operation to perform.
*/
public IndexRequest opType(OpType opType) {
if (opType != OpType.CREATE && opType != OpType.INDEX) {
throw new IllegalArgumentException("opType must be 'create' or 'index', found: [" + opType + "]");
}
this.opType = opType;
if (opType == OpType.CREATE) {
version(Versions.MATCH_DELETED);
@ -535,11 +477,19 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
}
/**
* Sets a string representation of the {@link #opType(org.elasticsearch.action.index.IndexRequest.OpType)}. Can
* Sets a string representation of the {@link #opType(OpType)}. Can
* be either "index" or "create".
*/
public IndexRequest opType(String opType) {
return opType(OpType.fromString(opType));
String op = opType.toLowerCase(Locale.ROOT);
if (op.equals("create")) {
opType(OpType.CREATE);
} else if (op.equals("index")) {
opType(OpType.INDEX);
} else {
throw new IllegalArgumentException("opType must be 'create' or 'index', found: [" + opType + "]");
}
return this;
}
@ -554,34 +504,29 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
}
}
/**
* The type of operation to perform.
*/
@Override
public OpType opType() {
return this.opType;
}
/**
* Sets the version, which will cause the index operation to only be performed if a matching
* version exists and no changes happened on the doc since then.
*/
@Override
public IndexRequest version(long version) {
this.version = version;
return this;
}
@Override
public long version() {
return this.version;
}
/**
* Sets the versioning type. Defaults to {@link VersionType#INTERNAL}.
*/
@Override
public IndexRequest versionType(VersionType versionType) {
this.versionType = versionType;
return this;
}
@Override
public VersionType versionType() {
return this.versionType;
}
@ -673,7 +618,7 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
out.writeOptionalString(timestamp);
out.writeOptionalWriteable(ttl);
out.writeBytesReference(source);
out.writeByte(opType.id());
out.writeByte(opType.getId());
out.writeLong(version);
out.writeByte(versionType.getValue());
out.writeOptionalString(pipeline);

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.index;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.support.WriteRequestBuilder;
import org.elasticsearch.action.support.replication.ReplicationRequestBuilder;
import org.elasticsearch.client.ElasticsearchClient;
@ -200,7 +201,7 @@ public class IndexRequestBuilder extends ReplicationRequestBuilder<IndexRequest,
/**
* Sets the type of operation to perform.
*/
public IndexRequestBuilder setOpType(IndexRequest.OpType opType) {
public IndexRequestBuilder setOpType(DocWriteRequest.OpType opType) {
request.opType(opType);
return this;
}

View File

@ -24,6 +24,7 @@ import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.bulk.BulkAction;
import org.elasticsearch.action.bulk.BulkItemResponse;
import org.elasticsearch.action.bulk.BulkRequest;
@ -134,7 +135,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio
return Integer.MAX_VALUE;
}
static final class BulkRequestModifier implements Iterator<ActionRequest<?>> {
static final class BulkRequestModifier implements Iterator<DocWriteRequest> {
final BulkRequest bulkRequest;
final Set<Integer> failedSlots;
@ -150,7 +151,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio
}
@Override
public ActionRequest next() {
public DocWriteRequest next() {
return bulkRequest.requests().get(++currentSlot);
}
@ -171,7 +172,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio
int slot = 0;
originalSlots = new int[bulkRequest.requests().size() - failedSlots.size()];
for (int i = 0; i < bulkRequest.requests().size(); i++) {
ActionRequest request = bulkRequest.requests().get(i);
DocWriteRequest request = bulkRequest.requests().get(i);
if (failedSlots.contains(i) == false) {
modifiedBulkRequest.add(request);
originalSlots[slot++] = i;
@ -207,7 +208,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio
// 3) Continue with the next request in the bulk.
failedSlots.add(currentSlot);
BulkItemResponse.Failure failure = new BulkItemResponse.Failure(indexRequest.index(), indexRequest.type(), indexRequest.id(), e);
itemResponses.add(new BulkItemResponse(currentSlot, indexRequest.opType().lowercase(), failure));
itemResponses.add(new BulkItemResponse(currentSlot, indexRequest.opType(), failure));
}
}

View File

@ -26,8 +26,10 @@ abstract class AbstractAsyncAction {
private final long startTime;
protected AbstractAsyncAction() {
this.startTime = System.currentTimeMillis();
protected AbstractAsyncAction() { this(System.currentTimeMillis());}
protected AbstractAsyncAction(long startTime) {
this.startTime = startTime;
}
/**

View File

@ -27,104 +27,71 @@ import org.apache.lucene.search.ScoreDoc;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.NoShardAvailableActionException;
import org.elasticsearch.action.support.TransportActions;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.SearchPhaseResult;
import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.fetch.ShardFetchSearchRequest;
import org.elasticsearch.search.internal.AliasFilter;
import org.elasticsearch.search.internal.InternalSearchResponse;
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
import org.elasticsearch.search.query.QuerySearchResult;
import org.elasticsearch.search.query.QuerySearchResultProvider;
import org.elasticsearch.threadpool.ThreadPool;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Executor;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Function;
import static org.elasticsearch.action.search.TransportSearchHelper.internalSearchRequest;
abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult> extends AbstractAsyncAction {
protected final Logger logger;
protected final SearchTransportService searchTransportService;
private final IndexNameExpressionResolver indexNameExpressionResolver;
protected final SearchPhaseController searchPhaseController;
protected final ThreadPool threadPool;
private final Executor executor;
protected final ActionListener<SearchResponse> listener;
protected final GroupShardsIterator shardsIts;
private final GroupShardsIterator shardsIts;
protected final SearchRequest request;
protected final ClusterState clusterState;
protected final DiscoveryNodes nodes;
/** Used by subclasses to resolve node ids to DiscoveryNodes. **/
protected final Function<String, DiscoveryNode> nodeIdToDiscoveryNode;
protected final int expectedSuccessfulOps;
private final int expectedTotalOps;
protected final AtomicInteger successfulOps = new AtomicInteger();
private final AtomicInteger totalOps = new AtomicInteger();
protected final AtomicArray<FirstResult> firstResults;
private final Map<String, AliasFilter> aliasFilter;
private final long clusterStateVersion;
private volatile AtomicArray<ShardSearchFailure> shardFailures;
private final Object shardFailuresMutex = new Object();
protected volatile ScoreDoc[] sortedShardDocs;
protected AbstractSearchAsyncAction(Logger logger, SearchTransportService searchTransportService, ClusterService clusterService,
IndexNameExpressionResolver indexNameExpressionResolver,
SearchPhaseController searchPhaseController, ThreadPool threadPool, SearchRequest request,
ActionListener<SearchResponse> listener) {
protected AbstractSearchAsyncAction(Logger logger, SearchTransportService searchTransportService,
Function<String, DiscoveryNode> nodeIdToDiscoveryNode,
Map<String, AliasFilter> aliasFilter, Executor executor, SearchRequest request,
ActionListener<SearchResponse> listener, GroupShardsIterator shardsIts, long startTime,
long clusterStateVersion) {
super(startTime);
this.logger = logger;
this.searchTransportService = searchTransportService;
this.indexNameExpressionResolver = indexNameExpressionResolver;
this.searchPhaseController = searchPhaseController;
this.threadPool = threadPool;
this.executor = executor;
this.request = request;
this.listener = listener;
this.clusterState = clusterService.state();
nodes = clusterState.nodes();
clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ);
// TODO: I think startTime() should become part of ActionRequest and that should be used both for index name
// date math expressions and $now in scripts. This way all apis will deal with now in the same way instead
// of just for the _search api
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request.indicesOptions(),
startTime(), request.indices());
for (String index : concreteIndices) {
clusterState.blocks().indexBlockedRaiseException(ClusterBlockLevel.READ, index);
}
Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, request.routing(),
request.indices());
shardsIts = clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, request.preference());
final int shardCount = shardsIts.size();
failIfOverShardCountLimit(clusterService, shardCount);
expectedSuccessfulOps = shardCount;
this.nodeIdToDiscoveryNode = nodeIdToDiscoveryNode;
this.clusterStateVersion = clusterStateVersion;
this.shardsIts = shardsIts;
expectedSuccessfulOps = shardsIts.size();
// we need to add 1 for non active partition, since we count it in the total!
expectedTotalOps = shardsIts.totalSizeWith1ForEmpty();
firstResults = new AtomicArray<>(shardsIts.size());
this.aliasFilter = aliasFilter;
}
private void failIfOverShardCountLimit(ClusterService clusterService, int shardCount) {
final long shardCountLimit = clusterService.getClusterSettings().get(TransportSearchAction.SHARD_COUNT_LIMIT_SETTING);
if (shardCount > shardCountLimit) {
throw new IllegalArgumentException("Trying to query " + shardCount + " shards, which is over the limit of "
+ shardCountLimit + ". This limit exists because querying many shards at the same time can make the "
+ "job of the coordinating node very CPU and/or memory intensive. It is usually a better idea to "
+ "have a smaller number of larger shards. Update [" + TransportSearchAction.SHARD_COUNT_LIMIT_SETTING.getKey()
+ "] to a greater value if you really want to query that many shards at the same time.");
}
}
public void start() {
if (expectedSuccessfulOps == 0) {
@ -152,14 +119,14 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
// no more active shards... (we should not really get here, but just for safety)
onFirstPhaseResult(shardIndex, null, null, shardIt, new NoShardAvailableActionException(shardIt.shardId()));
} else {
final DiscoveryNode node = nodes.get(shard.currentNodeId());
final DiscoveryNode node = nodeIdToDiscoveryNode.apply(shard.currentNodeId());
if (node == null) {
onFirstPhaseResult(shardIndex, shard, null, shardIt, new NoShardAvailableActionException(shardIt.shardId()));
} else {
String[] filteringAliases = indexNameExpressionResolver.filteringAliases(clusterState,
shard.index().getName(), request.indices());
sendExecuteFirstPhase(node, internalSearchRequest(shard, shardsIts.size(), request, filteringAliases,
startTime()), new ActionListener<FirstResult>() {
AliasFilter filter = this.aliasFilter.get(shard.index().getName());
ShardSearchTransportRequest transportRequest = new ShardSearchTransportRequest(request, shard, shardsIts.size(),
filter, startTime());
sendExecuteFirstPhase(node, transportRequest , new ActionListener<FirstResult>() {
@Override
public void onResponse(FirstResult result) {
onFirstPhaseResult(shardIndex, shard, result, shardIt);
@ -319,7 +286,7 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
private void raiseEarlyFailure(Exception e) {
for (AtomicArray.Entry<FirstResult> entry : firstResults.asList()) {
try {
DiscoveryNode node = nodes.get(entry.value.shardTarget().nodeId());
DiscoveryNode node = nodeIdToDiscoveryNode.apply(entry.value.shardTarget().nodeId());
sendReleaseSearchContext(entry.value.id(), node);
} catch (Exception inner) {
inner.addSuppressed(e);
@ -344,7 +311,7 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
if (queryResult.hasHits()
&& docIdsToLoad.get(entry.index) == null) { // but none of them made it to the global top docs
try {
DiscoveryNode node = nodes.get(entry.value.queryResult().shardTarget().nodeId());
DiscoveryNode node = nodeIdToDiscoveryNode.apply(entry.value.queryResult().shardTarget().nodeId());
sendReleaseSearchContext(entry.value.queryResult().id(), node);
} catch (Exception e) {
logger.trace("failed to release context", e);
@ -402,7 +369,7 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
sb.append(result.shardTarget());
}
logger.trace("Moving to second phase, based on results from: {} (cluster state version: {})", sb, clusterState.version());
logger.trace("Moving to second phase, based on results from: {} (cluster state version: {})", sb, clusterStateVersion);
}
moveToSecondPhase();
}
@ -410,4 +377,9 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
protected abstract void moveToSecondPhase() throws Exception;
protected abstract String firstPhaseName();
protected Executor getExecutor() {
return executor;
}
}

View File

@ -24,31 +24,35 @@ import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRunnable;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.dfs.AggregatedDfs;
import org.elasticsearch.search.dfs.DfsSearchResult;
import org.elasticsearch.search.fetch.QueryFetchSearchResult;
import org.elasticsearch.search.internal.AliasFilter;
import org.elasticsearch.search.internal.InternalSearchResponse;
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
import org.elasticsearch.search.query.QuerySearchRequest;
import org.elasticsearch.threadpool.ThreadPool;
import java.io.IOException;
import java.util.Map;
import java.util.concurrent.Executor;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Function;
class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<DfsSearchResult> {
private final AtomicArray<QueryFetchSearchResult> queryFetchResults;
private final SearchPhaseController searchPhaseController;
SearchDfsQueryAndFetchAsyncAction(Logger logger, SearchTransportService searchTransportService,
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
SearchPhaseController searchPhaseController, ThreadPool threadPool,
SearchRequest request, ActionListener<SearchResponse> listener) {
super(logger, searchTransportService, clusterService, indexNameExpressionResolver, searchPhaseController, threadPool,
request, listener);
Function<String, DiscoveryNode> nodeIdToDiscoveryNode,
Map<String, AliasFilter> aliasFilter, SearchPhaseController searchPhaseController,
Executor executor, SearchRequest request, ActionListener<SearchResponse> listener,
GroupShardsIterator shardsIts, long startTime, long clusterStateVersion) {
super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, executor,
request, listener, shardsIts, startTime, clusterStateVersion);
this.searchPhaseController = searchPhaseController;
queryFetchResults = new AtomicArray<>(firstResults.length());
}
@ -70,7 +74,7 @@ class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<DfsSea
for (final AtomicArray.Entry<DfsSearchResult> entry : firstResults.asList()) {
DfsSearchResult dfsResult = entry.value;
DiscoveryNode node = nodes.get(dfsResult.shardTarget().nodeId());
DiscoveryNode node = nodeIdToDiscoveryNode.apply(dfsResult.shardTarget().nodeId());
QuerySearchRequest querySearchRequest = new QuerySearchRequest(request, dfsResult.id(), dfs);
executeSecondPhase(entry.index, dfsResult, counter, node, querySearchRequest);
}
@ -115,7 +119,7 @@ class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<DfsSea
}
private void finishHim() {
threadPool.executor(ThreadPool.Names.SEARCH).execute(new ActionRunnable<SearchResponse>(listener) {
getExecutor().execute(new ActionRunnable<SearchResponse>(listener) {
@Override
public void doRun() throws IOException {
sortedShardDocs = searchPhaseController.sortDocs(true, queryFetchResults);

View File

@ -26,36 +26,41 @@ import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.search.ScoreDoc;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRunnable;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.dfs.AggregatedDfs;
import org.elasticsearch.search.dfs.DfsSearchResult;
import org.elasticsearch.search.fetch.FetchSearchResult;
import org.elasticsearch.search.fetch.ShardFetchSearchRequest;
import org.elasticsearch.search.internal.AliasFilter;
import org.elasticsearch.search.internal.InternalSearchResponse;
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
import org.elasticsearch.search.query.QuerySearchRequest;
import org.elasticsearch.search.query.QuerySearchResult;
import org.elasticsearch.threadpool.ThreadPool;
import java.io.IOException;
import java.util.Map;
import java.util.concurrent.Executor;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Function;
class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<DfsSearchResult> {
final AtomicArray<QuerySearchResult> queryResults;
final AtomicArray<FetchSearchResult> fetchResults;
final AtomicArray<IntArrayList> docIdsToLoad;
private final SearchPhaseController searchPhaseController;
SearchDfsQueryThenFetchAsyncAction(Logger logger, SearchTransportService searchTransportService,
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
SearchPhaseController searchPhaseController, ThreadPool threadPool,
SearchRequest request, ActionListener<SearchResponse> listener) {
super(logger, searchTransportService, clusterService, indexNameExpressionResolver, searchPhaseController, threadPool,
request, listener);
Function<String, DiscoveryNode> nodeIdToDiscoveryNode,
Map<String, AliasFilter> aliasFilter, SearchPhaseController searchPhaseController,
Executor executor, SearchRequest request, ActionListener<SearchResponse> listener,
GroupShardsIterator shardsIts, long startTime, long clusterStateVersion) {
super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, executor,
request, listener, shardsIts, startTime, clusterStateVersion);
this.searchPhaseController = searchPhaseController;
queryResults = new AtomicArray<>(firstResults.length());
fetchResults = new AtomicArray<>(firstResults.length());
docIdsToLoad = new AtomicArray<>(firstResults.length());
@ -78,7 +83,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<DfsSe
final AtomicInteger counter = new AtomicInteger(firstResults.asList().size());
for (final AtomicArray.Entry<DfsSearchResult> entry : firstResults.asList()) {
DfsSearchResult dfsResult = entry.value;
DiscoveryNode node = nodes.get(dfsResult.shardTarget().nodeId());
DiscoveryNode node = nodeIdToDiscoveryNode.apply(dfsResult.shardTarget().nodeId());
QuerySearchRequest querySearchRequest = new QuerySearchRequest(request, dfsResult.id(), dfs);
executeQuery(entry.index, dfsResult, counter, querySearchRequest, node);
}
@ -149,7 +154,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<DfsSe
final AtomicInteger counter = new AtomicInteger(docIdsToLoad.asList().size());
for (final AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
QuerySearchResult queryResult = queryResults.get(entry.index);
DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId());
DiscoveryNode node = nodeIdToDiscoveryNode.apply(queryResult.shardTarget().nodeId());
ShardFetchSearchRequest fetchSearchRequest = createFetchRequest(queryResult, entry, lastEmittedDocPerShard);
executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node);
}
@ -192,7 +197,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<DfsSe
}
private void finishHim() {
threadPool.executor(ThreadPool.Names.SEARCH).execute(new ActionRunnable<SearchResponse>(listener) {
getExecutor().execute(new ActionRunnable<SearchResponse>(listener) {
@Override
public void doRun() throws IOException {
final boolean isScrollRequest = request.scroll() != null;

View File

@ -22,24 +22,32 @@ package org.elasticsearch.action.search;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRunnable;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.search.fetch.QueryFetchSearchResult;
import org.elasticsearch.search.internal.AliasFilter;
import org.elasticsearch.search.internal.InternalSearchResponse;
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
import org.elasticsearch.threadpool.ThreadPool;
import java.io.IOException;
import java.util.Map;
import java.util.concurrent.Executor;
import java.util.function.Function;
class SearchQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<QueryFetchSearchResult> {
private final SearchPhaseController searchPhaseController;
SearchQueryAndFetchAsyncAction(Logger logger, SearchTransportService searchTransportService,
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
SearchPhaseController searchPhaseController, ThreadPool threadPool,
SearchRequest request, ActionListener<SearchResponse> listener) {
super(logger, searchTransportService, clusterService, indexNameExpressionResolver, searchPhaseController, threadPool,
request, listener);
Function<String, DiscoveryNode> nodeIdToDiscoveryNode,
Map<String, AliasFilter> aliasFilter,
SearchPhaseController searchPhaseController, Executor executor,
SearchRequest request, ActionListener<SearchResponse> listener,
GroupShardsIterator shardsIts, long startTime, long clusterStateVersion) {
super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, executor,
request, listener, shardsIts, startTime, clusterStateVersion);
this.searchPhaseController = searchPhaseController;
}
@Override
@ -55,7 +63,7 @@ class SearchQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<QueryFetc
@Override
protected void moveToSecondPhase() throws Exception {
threadPool.executor(ThreadPool.Names.SEARCH).execute(new ActionRunnable<SearchResponse>(listener) {
getExecutor().execute(new ActionRunnable<SearchResponse>(listener) {
@Override
public void doRun() throws IOException {
final boolean isScrollRequest = request.scroll() != null;

View File

@ -26,31 +26,38 @@ import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.search.ScoreDoc;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRunnable;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.fetch.FetchSearchResult;
import org.elasticsearch.search.fetch.ShardFetchSearchRequest;
import org.elasticsearch.search.internal.AliasFilter;
import org.elasticsearch.search.internal.InternalSearchResponse;
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
import org.elasticsearch.search.query.QuerySearchResultProvider;
import org.elasticsearch.threadpool.ThreadPool;
import java.io.IOException;
import java.util.Map;
import java.util.concurrent.Executor;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Function;
class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<QuerySearchResultProvider> {
final AtomicArray<FetchSearchResult> fetchResults;
final AtomicArray<IntArrayList> docIdsToLoad;
private final SearchPhaseController searchPhaseController;
SearchQueryThenFetchAsyncAction(Logger logger, SearchTransportService searchService,
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
SearchPhaseController searchPhaseController, ThreadPool threadPool,
SearchRequest request, ActionListener<SearchResponse> listener) {
super(logger, searchService, clusterService, indexNameExpressionResolver, searchPhaseController, threadPool, request, listener);
SearchQueryThenFetchAsyncAction(Logger logger, SearchTransportService searchTransportService,
Function<String, DiscoveryNode> nodeIdToDiscoveryNode, Map<String,
AliasFilter> aliasFilter,
SearchPhaseController searchPhaseController, Executor executor,
SearchRequest request, ActionListener<SearchResponse> listener,
GroupShardsIterator shardsIts, long startTime, long clusterStateVersion) {
super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, executor, request, listener,
shardsIts, startTime, clusterStateVersion);
this.searchPhaseController = searchPhaseController;
fetchResults = new AtomicArray<>(firstResults.length());
docIdsToLoad = new AtomicArray<>(firstResults.length());
}
@ -82,7 +89,7 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<QuerySea
final AtomicInteger counter = new AtomicInteger(docIdsToLoad.asList().size());
for (AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
QuerySearchResultProvider queryResult = firstResults.get(entry.index);
DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId());
DiscoveryNode node = nodeIdToDiscoveryNode.apply(queryResult.shardTarget().nodeId());
ShardFetchSearchRequest fetchSearchRequest = createFetchRequest(queryResult.queryResult(), entry, lastEmittedDocPerShard);
executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node);
}
@ -125,7 +132,7 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<QuerySea
}
private void finishHim() {
threadPool.executor(ThreadPool.Names.SEARCH).execute(new ActionRunnable<SearchResponse>(listener) {
getExecutor().execute(new ActionRunnable<SearchResponse>(listener) {
@Override
public void doRun() throws IOException {
final boolean isScrollRequest = request.scroll() != null;

View File

@ -51,7 +51,7 @@ public class TransportMultiSearchAction extends HandledTransportAction<MultiSear
super(settings, MultiSearchAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, MultiSearchRequest::new);
this.clusterService = clusterService;
this.searchAction = searchAction;
this.availableProcessors = EsExecutors.boundedNumberOfProcessors(settings);
this.availableProcessors = EsExecutors.numberOfProcessors(settings);
}
// For testing only:

View File

@ -23,7 +23,10 @@ import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Setting;
@ -34,11 +37,16 @@ import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.indices.IndexClosedException;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.SearchService;
import org.elasticsearch.search.internal.AliasFilter;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Executor;
import java.util.function.Function;
import static org.elasticsearch.action.search.SearchType.QUERY_AND_FETCH;
import static org.elasticsearch.action.search.SearchType.QUERY_THEN_FETCH;
@ -52,6 +60,7 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
private final ClusterService clusterService;
private final SearchTransportService searchTransportService;
private final SearchPhaseController searchPhaseController;
private final SearchService searchService;
@Inject
public TransportSearchAction(Settings settings, ThreadPool threadPool, BigArrays bigArrays, ScriptService scriptService,
@ -63,18 +72,43 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
this.searchTransportService = new SearchTransportService(settings, transportService);
SearchTransportService.registerRequestHandler(transportService, searchService);
this.clusterService = clusterService;
this.searchService = searchService;
}
private Map<String, AliasFilter> buildPerIndexAliasFilter(SearchRequest request, ClusterState clusterState, String...concreteIndices) {
final Map<String, AliasFilter> aliasFilterMap = new HashMap<>();
for (String index : concreteIndices) {
clusterState.blocks().indexBlockedRaiseException(ClusterBlockLevel.READ, index);
AliasFilter aliasFilter = searchService.buildAliasFilter(clusterState, index, request.indices());
if (aliasFilter != null) {
aliasFilterMap.put(index, aliasFilter);
}
}
return aliasFilterMap;
}
@Override
protected void doExecute(SearchRequest searchRequest, ActionListener<SearchResponse> listener) {
// pure paranoia if time goes backwards we are at least positive
final long startTimeInMillis = Math.max(0, System.currentTimeMillis());
ClusterState clusterState = clusterService.state();
clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ);
// TODO: I think startTime() should become part of ActionRequest and that should be used both for index name
// date math expressions and $now in scripts. This way all apis will deal with now in the same way instead
// of just for the _search api
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, searchRequest.indicesOptions(),
startTimeInMillis, searchRequest.indices());
Map<String, AliasFilter> aliasFilter = buildPerIndexAliasFilter(searchRequest, clusterState, concreteIndices);
Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, searchRequest.routing(),
searchRequest.indices());
GroupShardsIterator shardIterators = clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap,
searchRequest.preference());
failIfOverShardCountLimit(clusterService, shardIterators.size());
// optimize search type for cases where there is only one shard group to search on
try {
ClusterState clusterState = clusterService.state();
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, searchRequest);
Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState,
searchRequest.routing(), searchRequest.indices());
int shardCount = clusterService.operationRouting().searchShardsCount(clusterState, concreteIndices, routingMap);
if (shardCount == 1) {
if (shardIterators.size() == 1) {
// if we only have one group, then we always want Q_A_F, no need for DFS, and no need to do THEN since we hit one shard
searchRequest.searchType(QUERY_AND_FETCH);
}
@ -95,27 +129,37 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
logger.debug("failed to optimize search type, continue as normal", e);
}
searchAsyncAction(searchRequest, listener).start();
searchAsyncAction(searchRequest, shardIterators, startTimeInMillis, clusterState, Collections.unmodifiableMap(aliasFilter)
, listener).start();
}
private AbstractSearchAsyncAction searchAsyncAction(SearchRequest searchRequest, ActionListener<SearchResponse> listener) {
private AbstractSearchAsyncAction searchAsyncAction(SearchRequest searchRequest, GroupShardsIterator shardIterators, long startTime,
ClusterState state, Map<String, AliasFilter> aliasFilter,
ActionListener<SearchResponse> listener) {
final Function<String, DiscoveryNode> nodesLookup = state.nodes()::get;
final long clusterStateVersion = state.version();
Executor executor = threadPool.executor(ThreadPool.Names.SEARCH);
AbstractSearchAsyncAction searchAsyncAction;
switch(searchRequest.searchType()) {
case DFS_QUERY_THEN_FETCH:
searchAsyncAction = new SearchDfsQueryThenFetchAsyncAction(logger, searchTransportService, clusterService,
indexNameExpressionResolver, searchPhaseController, threadPool, searchRequest, listener);
searchAsyncAction = new SearchDfsQueryThenFetchAsyncAction(logger, searchTransportService, nodesLookup,
aliasFilter, searchPhaseController, executor, searchRequest, listener, shardIterators, startTime,
clusterStateVersion);
break;
case QUERY_THEN_FETCH:
searchAsyncAction = new SearchQueryThenFetchAsyncAction(logger, searchTransportService, clusterService,
indexNameExpressionResolver, searchPhaseController, threadPool, searchRequest, listener);
searchAsyncAction = new SearchQueryThenFetchAsyncAction(logger, searchTransportService, nodesLookup,
aliasFilter, searchPhaseController, executor, searchRequest, listener, shardIterators, startTime,
clusterStateVersion);
break;
case DFS_QUERY_AND_FETCH:
searchAsyncAction = new SearchDfsQueryAndFetchAsyncAction(logger, searchTransportService, clusterService,
indexNameExpressionResolver, searchPhaseController, threadPool, searchRequest, listener);
searchAsyncAction = new SearchDfsQueryAndFetchAsyncAction(logger, searchTransportService, nodesLookup,
aliasFilter, searchPhaseController, executor, searchRequest, listener, shardIterators, startTime,
clusterStateVersion);
break;
case QUERY_AND_FETCH:
searchAsyncAction = new SearchQueryAndFetchAsyncAction(logger, searchTransportService, clusterService,
indexNameExpressionResolver, searchPhaseController, threadPool, searchRequest, listener);
searchAsyncAction = new SearchQueryAndFetchAsyncAction(logger, searchTransportService, nodesLookup,
aliasFilter, searchPhaseController, executor, searchRequest, listener, shardIterators, startTime,
clusterStateVersion);
break;
default:
throw new IllegalStateException("Unknown search type: [" + searchRequest.searchType() + "]");
@ -123,4 +167,15 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
return searchAsyncAction;
}
private void failIfOverShardCountLimit(ClusterService clusterService, int shardCount) {
final long shardCountLimit = clusterService.getClusterSettings().get(SHARD_COUNT_LIMIT_SETTING);
if (shardCount > shardCountLimit) {
throw new IllegalArgumentException("Trying to query " + shardCount + " shards, which is over the limit of "
+ shardCountLimit + ". This limit exists because querying many shards at the same time can make the "
+ "job of the coordinating node very CPU and/or memory intensive. It is usually a better idea to "
+ "have a smaller number of larger shards. Update [" + SHARD_COUNT_LIMIT_SETTING.getKey()
+ "] to a greater value if you really want to query that many shards at the same time.");
}
}
}

View File

@ -32,11 +32,6 @@ import java.util.Base64;
final class TransportSearchHelper {
static ShardSearchTransportRequest internalSearchRequest(ShardRouting shardRouting, int numberOfShards, SearchRequest request,
String[] filteringAliases, long nowInMillis) {
return new ShardSearchTransportRequest(request, shardRouting, numberOfShards, filteringAliases, nowInMillis);
}
static InternalScrollSearchRequest internalScrollSearchRequest(long id, SearchScrollRequest request) {
return new InternalScrollSearchRequest(request, id);
}

View File

@ -44,6 +44,7 @@ import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReferenceArray;
import java.util.function.Supplier;
@ -83,9 +84,9 @@ public abstract class TransportBroadcastAction<Request extends BroadcastRequest<
protected abstract ShardResponse newShardResponse();
protected abstract ShardResponse shardOperation(ShardRequest request);
protected abstract ShardResponse shardOperation(ShardRequest request) throws IOException;
protected ShardResponse shardOperation(ShardRequest request, Task task) {
protected ShardResponse shardOperation(ShardRequest request, Task task) throws IOException {
return shardOperation(request);
}

View File

@ -46,6 +46,7 @@ import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.function.Supplier;
import static org.elasticsearch.action.support.TransportActions.isShardNotAvailableException;
@ -94,7 +95,7 @@ public abstract class TransportSingleShardAction<Request extends SingleShardRequ
new AsyncSingleAction(request, listener).start();
}
protected abstract Response shardOperation(Request request, ShardId shardId);
protected abstract Response shardOperation(Request request, ShardId shardId) throws IOException;
protected abstract Response newResponse();

View File

@ -105,13 +105,13 @@ import static org.apache.lucene.util.ArrayUtil.grow;
* <li>vint: frequency (always returned)</li>
* <li>
* <ul>
* <li>vint: position_1 (if positions == true)</li>
* <li>vint: startOffset_1 (if offset == true)</li>
* <li>vint: endOffset_1 (if offset == true)</li>
* <li>BytesRef: payload_1 (if payloads == true)</li>
* <li>vint: position_1 (if positions)</li>
* <li>vint: startOffset_1 (if offset)</li>
* <li>vint: endOffset_1 (if offset)</li>
* <li>BytesRef: payload_1 (if payloads)</li>
* <li>...</li>
* <li>vint: endOffset_freqency (if offset == true)</li>
* <li>BytesRef: payload_freqency (if payloads == true)</li>
* <li>vint: endOffset_freqency (if offset)</li>
* <li>BytesRef: payload_freqency (if payloads)</li>
* </ul></li>
* </ul>
*/

View File

@ -21,7 +21,6 @@ package org.elasticsearch.action.termvectors;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.DocumentRequest;
import org.elasticsearch.action.RealtimeRequest;
import org.elasticsearch.action.ValidateActions;
import org.elasticsearch.action.get.MultiGetRequest;
@ -56,7 +55,7 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
* Note, the {@link #index()}, {@link #type(String)} and {@link #id(String)} are
* required.
*/
public class TermVectorsRequest extends SingleShardRequest<TermVectorsRequest> implements DocumentRequest<TermVectorsRequest>, RealtimeRequest {
public class TermVectorsRequest extends SingleShardRequest<TermVectorsRequest> implements RealtimeRequest {
private String type;
@ -200,7 +199,6 @@ public class TermVectorsRequest extends SingleShardRequest<TermVectorsRequest> i
/**
* Returns the type of document to get the term vector for.
*/
@Override
public String type() {
return type;
}
@ -208,7 +206,6 @@ public class TermVectorsRequest extends SingleShardRequest<TermVectorsRequest> i
/**
* Returns the id of document the term vector is requested for.
*/
@Override
public String id() {
return id;
}
@ -250,18 +247,15 @@ public class TermVectorsRequest extends SingleShardRequest<TermVectorsRequest> i
/**
* @return The routing for this request.
*/
@Override
public String routing() {
return routing;
}
@Override
public TermVectorsRequest routing(String routing) {
this.routing = routing;
return this;
}
@Override
public String parent() {
return parent;
}

View File

@ -20,7 +20,7 @@
package org.elasticsearch.action.update;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.DocumentRequest;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.support.WriteRequest;
@ -55,7 +55,7 @@ import java.util.Map;
import static org.elasticsearch.action.ValidateActions.addValidationError;
public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
implements DocumentRequest<UpdateRequest>, WriteRequest<UpdateRequest> {
implements DocWriteRequest<UpdateRequest>, WriteRequest<UpdateRequest> {
private static final DeprecationLogger DEPRECATION_LOGGER =
new DeprecationLogger(Loggers.getLogger(UpdateRequest.class));
@ -469,31 +469,33 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
return this.retryOnConflict;
}
/**
* Sets the version, which will cause the index operation to only be performed if a matching
* version exists and no changes happened on the doc since then.
*/
@Override
public UpdateRequest version(long version) {
this.version = version;
return this;
}
@Override
public long version() {
return this.version;
}
/**
* Sets the versioning type. Defaults to {@link VersionType#INTERNAL}.
*/
@Override
public UpdateRequest versionType(VersionType versionType) {
this.versionType = versionType;
return this;
}
@Override
public VersionType versionType() {
return this.versionType;
}
@Override
public OpType opType() {
return OpType.UPDATE;
}
@Override
public UpdateRequest setRefreshPolicy(RefreshPolicy refreshPolicy) {
this.refreshPolicy = refreshPolicy;

View File

@ -308,7 +308,8 @@ final class BootstrapCheck {
static class MaxNumberOfThreadsCheck implements Check {
private final long maxNumberOfThreadsThreshold = 1 << 11;
// this should be plenty for machines up to 256 cores
private final long maxNumberOfThreadsThreshold = 1 << 12;
@Override
public boolean check() {

View File

@ -68,11 +68,6 @@ public class OperationRouting extends AbstractComponent {
return preferenceActiveShardIterator(indexShard, clusterState.nodes().getLocalNodeId(), clusterState.nodes(), preference);
}
public int searchShardsCount(ClusterState clusterState, String[] concreteIndices, @Nullable Map<String, Set<String>> routing) {
final Set<IndexShardRoutingTable> shards = computeTargetedShards(clusterState, concreteIndices, routing);
return shards.size();
}
public GroupShardsIterator searchShards(ClusterState clusterState, String[] concreteIndices, @Nullable Map<String, Set<String>> routing, @Nullable String preference) {
final Set<IndexShardRoutingTable> shards = computeTargetedShards(clusterState, concreteIndices, routing);
final Set<ShardIterator> set = new HashSet<>(shards.size());

View File

@ -20,12 +20,6 @@
package org.elasticsearch.common.bytes;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.io.stream.StreamInput;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
public final class BytesArray extends BytesReference {

View File

@ -67,13 +67,13 @@ import java.util.function.ToLongBiFunction;
*/
public class Cache<K, V> {
// positive if entries have an expiration
private long expireAfterAccess = -1;
private long expireAfterAccessNanos = -1;
// true if entries can expire after access
private boolean entriesExpireAfterAccess;
// positive if entries have an expiration after write
private long expireAfterWrite = -1;
private long expireAfterWriteNanos = -1;
// true if entries can expire after initial insertion
private boolean entriesExpireAfterWrite;
@ -98,22 +98,32 @@ public class Cache<K, V> {
Cache() {
}
void setExpireAfterAccess(long expireAfterAccess) {
if (expireAfterAccess <= 0) {
throw new IllegalArgumentException("expireAfterAccess <= 0");
void setExpireAfterAccessNanos(long expireAfterAccessNanos) {
if (expireAfterAccessNanos <= 0) {
throw new IllegalArgumentException("expireAfterAccessNanos <= 0");
}
this.expireAfterAccess = expireAfterAccess;
this.expireAfterAccessNanos = expireAfterAccessNanos;
this.entriesExpireAfterAccess = true;
}
void setExpireAfterWrite(long expireAfterWrite) {
if (expireAfterWrite <= 0) {
throw new IllegalArgumentException("expireAfterWrite <= 0");
// pkg-private for testing
long getExpireAfterAccessNanos() {
return this.expireAfterAccessNanos;
}
this.expireAfterWrite = expireAfterWrite;
void setExpireAfterWriteNanos(long expireAfterWriteNanos) {
if (expireAfterWriteNanos <= 0) {
throw new IllegalArgumentException("expireAfterWriteNanos <= 0");
}
this.expireAfterWriteNanos = expireAfterWriteNanos;
this.entriesExpireAfterWrite = true;
}
// pkg-private for testing
long getExpireAfterWriteNanos() {
return this.expireAfterWriteNanos;
}
void setMaximumWeight(long maximumWeight) {
if (maximumWeight < 0) {
throw new IllegalArgumentException("maximumWeight < 0");
@ -696,8 +706,8 @@ public class Cache<K, V> {
}
private boolean isExpired(Entry<K, V> entry, long now) {
return (entriesExpireAfterAccess && now - entry.accessTime > expireAfterAccess) ||
(entriesExpireAfterWrite && now - entry.writeTime > expireAfterWrite);
return (entriesExpireAfterAccess && now - entry.accessTime > expireAfterAccessNanos) ||
(entriesExpireAfterWrite && now - entry.writeTime > expireAfterWriteNanos);
}
private boolean unlink(Entry<K, V> entry) {

View File

@ -19,13 +19,15 @@
package org.elasticsearch.common.cache;
import org.elasticsearch.common.unit.TimeValue;
import java.util.Objects;
import java.util.function.ToLongBiFunction;
public class CacheBuilder<K, V> {
private long maximumWeight = -1;
private long expireAfterAccess = -1;
private long expireAfterWrite = -1;
private long expireAfterAccessNanos = -1;
private long expireAfterWriteNanos = -1;
private ToLongBiFunction<K, V> weigher;
private RemovalListener<K, V> removalListener;
@ -44,19 +46,35 @@ public class CacheBuilder<K, V> {
return this;
}
public CacheBuilder<K, V> setExpireAfterAccess(long expireAfterAccess) {
if (expireAfterAccess <= 0) {
/**
* Sets the amount of time before an entry in the cache expires after it was last accessed.
*
* @param expireAfterAccess The amount of time before an entry expires after it was last accessed. Must not be {@code null} and must
* be greater than 0.
*/
public CacheBuilder<K, V> setExpireAfterAccess(TimeValue expireAfterAccess) {
Objects.requireNonNull(expireAfterAccess);
final long expireAfterAccessNanos = expireAfterAccess.getNanos();
if (expireAfterAccessNanos <= 0) {
throw new IllegalArgumentException("expireAfterAccess <= 0");
}
this.expireAfterAccess = expireAfterAccess;
this.expireAfterAccessNanos = expireAfterAccessNanos;
return this;
}
public CacheBuilder<K, V> setExpireAfterWrite(long expireAfterWrite) {
if (expireAfterWrite <= 0) {
/**
* Sets the amount of time before an entry in the cache expires after it was written.
*
* @param expireAfterWrite The amount of time before an entry expires after it was written. Must not be {@code null} and must be
* greater than 0.
*/
public CacheBuilder<K, V> setExpireAfterWrite(TimeValue expireAfterWrite) {
Objects.requireNonNull(expireAfterWrite);
final long expireAfterWriteNanos = expireAfterWrite.getNanos();
if (expireAfterWriteNanos <= 0) {
throw new IllegalArgumentException("expireAfterWrite <= 0");
}
this.expireAfterWrite = expireAfterWrite;
this.expireAfterWriteNanos = expireAfterWriteNanos;
return this;
}
@ -77,11 +95,11 @@ public class CacheBuilder<K, V> {
if (maximumWeight != -1) {
cache.setMaximumWeight(maximumWeight);
}
if (expireAfterAccess != -1) {
cache.setExpireAfterAccess(expireAfterAccess);
if (expireAfterAccessNanos != -1) {
cache.setExpireAfterAccessNanos(expireAfterAccessNanos);
}
if (expireAfterWrite != -1) {
cache.setExpireAfterWrite(expireAfterWrite);
if (expireAfterWriteNanos != -1) {
cache.setExpireAfterWriteNanos(expireAfterWriteNanos);
}
if (weigher != null) {
cache.setWeigher(weigher);

View File

@ -80,4 +80,11 @@ public class PortsRange {
public interface PortCallback {
boolean onPortNumber(int portNumber);
}
@Override
public String toString() {
return "PortsRange{" +
"portRange='" + portRange + '\'' +
'}';
}
}

View File

@ -397,7 +397,7 @@ public class BigArrays implements Releasable {
void adjustBreaker(long delta) {
if (this.breakerService != null) {
CircuitBreaker breaker = this.breakerService.getBreaker(CircuitBreaker.REQUEST);
if (this.checkBreaker == true) {
if (this.checkBreaker) {
// checking breaker means potentially tripping, but it doesn't
// have to if the delta is negative
if (delta > 0) {

View File

@ -69,7 +69,7 @@ public class PageCacheRecycler extends AbstractComponent implements Releasable {
super(settings);
final Type type = TYPE_SETTING .get(settings);
final long limit = LIMIT_HEAP_SETTING .get(settings).getBytes();
final int availableProcessors = EsExecutors.boundedNumberOfProcessors(settings);
final int availableProcessors = EsExecutors.numberOfProcessors(settings);
// We have a global amount of memory that we need to divide across data types.
// Since some types are more useful than other ones we give them different weights.

View File

@ -40,16 +40,17 @@ public class EsExecutors {
* This is used to adjust thread pools sizes etc. per node.
*/
public static final Setting<Integer> PROCESSORS_SETTING =
Setting.intSetting("processors", Math.min(32, Runtime.getRuntime().availableProcessors()), 1, Property.NodeScope);
Setting.intSetting("processors", Runtime.getRuntime().availableProcessors(), 1, Property.NodeScope);
/**
* Returns the number of processors available but at most <tt>32</tt>.
* Returns the number of available processors. Defaults to
* {@link Runtime#availableProcessors()} but can be overridden by passing a {@link Settings}
* instance with the key "processors" set to the desired value.
*
* @param settings a {@link Settings} instance from which to derive the available processors
* @return the number of available processors
*/
public static int boundedNumberOfProcessors(Settings settings) {
/* This relates to issues where machines with large number of cores
* ie. >= 48 create too many threads and run into OOM see #3478
* We just use an 32 core upper-bound here to not stress the system
* too much with too many created threads */
public static int numberOfProcessors(final Settings settings) {
return PROCESSORS_SETTING.get(settings);
}

View File

@ -124,7 +124,7 @@ public abstract class AsyncShardFetch<T extends BaseNodeResponse> implements Rel
}
// if we are still fetching, return null to indicate it
if (hasAnyNodeFetching(cache) == true) {
if (hasAnyNodeFetching(cache)) {
return new FetchResult<>(shardId, null, emptySet(), emptySet());
} else {
// nothing to fetch, yay, build the return value
@ -137,7 +137,7 @@ public abstract class AsyncShardFetch<T extends BaseNodeResponse> implements Rel
DiscoveryNode node = nodes.get(nodeId);
if (node != null) {
if (nodeEntry.isFailed() == true) {
if (nodeEntry.isFailed()) {
// if its failed, remove it from the list of nodes, so if this run doesn't work
// we try again next round to fetch it again
it.remove();
@ -361,7 +361,7 @@ public abstract class AsyncShardFetch<T extends BaseNodeResponse> implements Rel
}
void doneFetching(T value) {
assert fetching == true : "setting value but not in fetching mode";
assert fetching : "setting value but not in fetching mode";
assert failure == null : "setting value when failure already set";
this.valueSet = true;
this.value = value;
@ -369,7 +369,7 @@ public abstract class AsyncShardFetch<T extends BaseNodeResponse> implements Rel
}
void doneFetching(Throwable failure) {
assert fetching == true : "setting value but not in fetching mode";
assert fetching : "setting value but not in fetching mode";
assert valueSet == false : "setting failure when already set value";
assert failure != null : "setting failure can't be null";
this.failure = failure;
@ -377,7 +377,7 @@ public abstract class AsyncShardFetch<T extends BaseNodeResponse> implements Rel
}
void restartFetching() {
assert fetching == true : "restarting fetching, but not in fetching mode";
assert fetching : "restarting fetching, but not in fetching mode";
assert valueSet == false : "value can't be set when restarting fetching";
assert failure == null : "failure can't be set when restarting fetching";
this.fetching = false;
@ -388,7 +388,7 @@ public abstract class AsyncShardFetch<T extends BaseNodeResponse> implements Rel
}
boolean hasData() {
return valueSet == true || failure != null;
return valueSet || failure != null;
}
Throwable getFailure() {
@ -399,7 +399,7 @@ public abstract class AsyncShardFetch<T extends BaseNodeResponse> implements Rel
@Nullable
T getValue() {
assert failure == null : "trying to fetch value, but its marked as failed, check isFailed";
assert valueSet == true : "value is not set, hasn't been fetched yet";
assert valueSet : "value is not set, hasn't been fetched yet";
return value;
}
}

View File

@ -153,7 +153,7 @@ public class DanglingIndicesState extends AbstractComponent {
* for allocation.
*/
private void allocateDanglingIndices() {
if (danglingIndices.isEmpty() == true) {
if (danglingIndices.isEmpty()) {
return;
}
try {

View File

@ -85,7 +85,7 @@ public class GatewayAllocator extends AbstractComponent {
boolean cleanCache = false;
DiscoveryNode localNode = event.state().nodes().getLocalNode();
if (localNode != null) {
if (localNode.isMasterNode() == true && event.localNodeMaster() == false) {
if (localNode.isMasterNode() && event.localNodeMaster() == false) {
cleanCache = true;
}
} else {
@ -174,7 +174,7 @@ public class GatewayAllocator extends AbstractComponent {
AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> shardState =
fetch.fetchData(allocation.nodes(), allocation.getIgnoreNodes(shard.shardId()));
if (shardState.hasData() == true) {
if (shardState.hasData()) {
shardState.processAllocation(allocation);
}
return shardState;
@ -199,7 +199,7 @@ public class GatewayAllocator extends AbstractComponent {
}
AsyncShardFetch.FetchResult<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> shardStores =
fetch.fetchData(allocation.nodes(), allocation.getIgnoreNodes(shard.shardId()));
if (shardStores.hasData() == true) {
if (shardStores.hasData()) {
shardStores.processAllocation(allocation);
}
return shardStores;

View File

@ -192,7 +192,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL
Set<Index> relevantIndices;
if (isDataOnlyNode(state)) {
relevantIndices = getRelevantIndicesOnDataOnlyNode(state, previousState, previouslyWrittenIndices);
} else if (state.nodes().getLocalNode().isMasterNode() == true) {
} else if (state.nodes().getLocalNode().isMasterNode()) {
relevantIndices = getRelevantIndicesForMasterEligibleNode(state);
} else {
relevantIndices = Collections.emptySet();

View File

@ -195,7 +195,7 @@ public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator {
"the allocation deciders returned a YES decision to allocate to node [" + nodeId + "]",
decidedNode.nodeShardState.allocationId(),
buildNodeDecisions(nodesToAllocate, explain));
} else if (nodesToAllocate.throttleNodeShards.isEmpty() == true && nodesToAllocate.noNodeShards.isEmpty() == false) {
} else if (nodesToAllocate.throttleNodeShards.isEmpty() && !nodesToAllocate.noNodeShards.isEmpty()) {
// The deciders returned a NO decision for all nodes with shard copies, so we check if primary shard
// can be force-allocated to one of the nodes.
final NodesToAllocate nodesToForceAllocate = buildNodesToAllocate(

View File

@ -65,7 +65,7 @@ public abstract class ReplicaShardAllocator extends BaseGatewayShardAllocator {
List<Runnable> shardCancellationActions = new ArrayList<>();
for (RoutingNode routingNode : routingNodes) {
for (ShardRouting shard : routingNode) {
if (shard.primary() == true) {
if (shard.primary()) {
continue;
}
if (shard.initializing() == false) {
@ -109,7 +109,7 @@ public abstract class ReplicaShardAllocator extends BaseGatewayShardAllocator {
}
if (currentNode.equals(nodeWithHighestMatch) == false
&& Objects.equals(currentSyncId, primaryStore.syncId()) == false
&& matchingNodes.isNodeMatchBySyncID(nodeWithHighestMatch) == true) {
&& matchingNodes.isNodeMatchBySyncID(nodeWithHighestMatch)) {
// we found a better match that has a full sync id match, the existing allocation is not fully synced
// so we found a better one, cancel this one
logger.debug("cancelling allocation of replica on [{}], sync id match found on node [{}]",

View File

@ -22,25 +22,18 @@ package org.elasticsearch.index;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.metadata.AliasMetaData;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.concurrent.FutureUtils;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.env.ShardLock;
import org.elasticsearch.env.ShardLockObtainFailedException;
@ -55,7 +48,6 @@ import org.elasticsearch.index.engine.EngineFactory;
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.shard.IndexEventListener;
import org.elasticsearch.index.shard.IndexSearcherWrapper;
@ -70,8 +62,6 @@ import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.index.store.IndexStore;
import org.elasticsearch.index.store.Store;
import org.elasticsearch.index.translog.Translog;
import org.elasticsearch.indices.AliasFilterParsingException;
import org.elasticsearch.indices.InvalidAliasNameException;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.indices.cluster.IndicesClusterStateService;
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
@ -89,7 +79,6 @@ import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
@ -476,7 +465,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
* used for rewriting since it does not know about the current {@link IndexReader}.
*/
public QueryShardContext newQueryShardContext() {
return newQueryShardContext(0, null, threadPool::estimatedTimeInMillis);
return newQueryShardContext(0, null, System::currentTimeMillis);
}
/**
@ -598,64 +587,6 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
}
}
/**
* Returns the filter associated with listed filtering aliases.
* <p>
* The list of filtering aliases should be obtained by calling MetaData.filteringAliases.
* Returns <tt>null</tt> if no filtering is required.</p>
*/
public Query aliasFilter(QueryShardContext context, String... aliasNames) {
if (aliasNames == null || aliasNames.length == 0) {
return null;
}
final ImmutableOpenMap<String, AliasMetaData> aliases = indexSettings.getIndexMetaData().getAliases();
if (aliasNames.length == 1) {
AliasMetaData alias = aliases.get(aliasNames[0]);
if (alias == null) {
// This shouldn't happen unless alias disappeared after filteringAliases was called.
throw new InvalidAliasNameException(index(), aliasNames[0], "Unknown alias name was passed to alias Filter");
}
return parse(alias, context);
} else {
// we need to bench here a bit, to see maybe it makes sense to use OrFilter
BooleanQuery.Builder combined = new BooleanQuery.Builder();
for (String aliasName : aliasNames) {
AliasMetaData alias = aliases.get(aliasName);
if (alias == null) {
// This shouldn't happen unless alias disappeared after filteringAliases was called.
throw new InvalidAliasNameException(indexSettings.getIndex(), aliasNames[0],
"Unknown alias name was passed to alias Filter");
}
Query parsedFilter = parse(alias, context);
if (parsedFilter != null) {
combined.add(parsedFilter, BooleanClause.Occur.SHOULD);
} else {
// The filter might be null only if filter was removed after filteringAliases was called
return null;
}
}
return combined.build();
}
}
private Query parse(AliasMetaData alias, QueryShardContext shardContext) {
if (alias.filter() == null) {
return null;
}
try {
byte[] filterSource = alias.filter().uncompressed();
try (XContentParser parser = XContentFactory.xContent(filterSource).createParser(filterSource)) {
Optional<QueryBuilder> innerQueryBuilder = shardContext.newParseContext(parser).parseInnerQueryBuilder();
if (innerQueryBuilder.isPresent()) {
return shardContext.toFilter(innerQueryBuilder.get()).query();
}
return null;
}
} catch (IOException ex) {
throw new AliasFilterParsingException(shardContext.index(), alias.getAlias(), "Invalid alias filter", ex);
}
}
public IndexMetaData getMetaData() {
return indexSettings.getIndexMetaData();
}

View File

@ -54,7 +54,7 @@ public final class MergeSchedulerConfig {
public static final Setting<Integer> MAX_THREAD_COUNT_SETTING =
new Setting<>("index.merge.scheduler.max_thread_count",
(s) -> Integer.toString(Math.max(1, Math.min(4, EsExecutors.boundedNumberOfProcessors(s) / 2))),
(s) -> Integer.toString(Math.max(1, Math.min(4, EsExecutors.numberOfProcessors(s) / 2))),
(s) -> Setting.parseInt(s, 1, "index.merge.scheduler.max_thread_count"), Property.Dynamic,
Property.IndexScope);
public static final Setting<Integer> MAX_MERGE_COUNT_SETTING =

View File

@ -68,7 +68,7 @@ public class GeoPointArrayIndexFieldData extends AbstractIndexGeoPointFieldData
estimator.afterLoad(null, data.ramBytesUsed());
return data;
}
return (indexSettings.getIndexVersionCreated().before(Version.V_2_2_0) == true) ?
return (indexSettings.getIndexVersionCreated().before(Version.V_2_2_0)) ?
loadLegacyFieldData(reader, estimator, terms, data) : loadFieldData22(reader, estimator, terms, data);
}

View File

@ -297,7 +297,7 @@ public class LegacyGeoPointFieldMapper extends BaseGeoPointFieldMapper implement
validPoint = true;
}
if (coerce.value() == true && validPoint == false) {
if (coerce.value() && validPoint == false) {
// by setting coerce to false we are assuming all geopoints are already in a valid coordinate system
// thus this extra step can be skipped
GeoUtils.normalizePoint(point, true, true);

View File

@ -39,6 +39,7 @@ public abstract class Mapper implements ToXContent, Iterable<Mapper> {
private final ContentPath contentPath;
public BuilderContext(Settings indexSettings, ContentPath contentPath) {
Objects.requireNonNull(indexSettings, "indexSettings is required");
this.contentPath = contentPath;
this.indexSettings = indexSettings;
}
@ -47,16 +48,11 @@ public abstract class Mapper implements ToXContent, Iterable<Mapper> {
return this.contentPath;
}
@Nullable
public Settings indexSettings() {
return this.indexSettings;
}
@Nullable
public Version indexCreatedVersion() {
if (indexSettings == null) {
return null;
}
return Version.indexCreated(indexSettings);
}
}

View File

@ -21,12 +21,10 @@ package org.elasticsearch.index.mapper;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
import org.elasticsearch.action.DocumentRequest;
import org.elasticsearch.common.lucene.BytesRefs;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
public final class Uid {

View File

@ -299,7 +299,7 @@ public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder<GeoBounding
QueryValidationException checkLatLon(boolean indexCreatedBeforeV2_0) {
// validation was not available prior to 2.x, so to support bwc percolation queries we only ignore_malformed on 2.x created indexes
if (GeoValidationMethod.isIgnoreMalformed(validationMethod) == true || indexCreatedBeforeV2_0) {
if (GeoValidationMethod.isIgnoreMalformed(validationMethod) || indexCreatedBeforeV2_0) {
return null;
}

View File

@ -408,7 +408,7 @@ public class GeoDistanceQueryBuilder extends AbstractQueryBuilder<GeoDistanceQue
optimizeBbox = parser.textOrNull();
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, COERCE_FIELD)) {
coerce = parser.booleanValue();
if (coerce == true) {
if (coerce) {
ignoreMalformed = true;
}
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, IGNORE_MALFORMED_FIELD)) {

View File

@ -327,7 +327,7 @@ public class GeoDistanceRangeQueryBuilder extends AbstractQueryBuilder<GeoDistan
} else {
fromValue = DistanceUnit.parse((String) from, unit, DistanceUnit.DEFAULT);
}
if (indexCreatedBeforeV2_2 == true) {
if (indexCreatedBeforeV2_2) {
fromValue = geoDistance.normalize(fromValue, DistanceUnit.DEFAULT);
}
} else {
@ -340,7 +340,7 @@ public class GeoDistanceRangeQueryBuilder extends AbstractQueryBuilder<GeoDistan
} else {
toValue = DistanceUnit.parse((String) to, unit, DistanceUnit.DEFAULT);
}
if (indexCreatedBeforeV2_2 == true) {
if (indexCreatedBeforeV2_2) {
toValue = geoDistance.normalize(toValue, DistanceUnit.DEFAULT);
}
} else {

View File

@ -227,7 +227,7 @@ public class MultiMatchQuery extends MatchQuery {
if (blendedFields == null) {
return super.blendTerm(term, fieldType);
}
return MultiMatchQuery.blendTerm(term.bytes(), commonTermsCutoff, tieBreaker, blendedFields);
return MultiMatchQuery.blendTerm(context, term.bytes(), commonTermsCutoff, tieBreaker, blendedFields);
}
@Override
@ -241,7 +241,8 @@ public class MultiMatchQuery extends MatchQuery {
}
}
static Query blendTerm(BytesRef value, Float commonTermsCutoff, float tieBreaker, FieldAndFieldType... blendedFields) {
static Query blendTerm(QueryShardContext context, BytesRef value, Float commonTermsCutoff, float tieBreaker,
FieldAndFieldType... blendedFields) {
List<Query> queries = new ArrayList<>();
Term[] terms = new Term[blendedFields.length];
float[] blendedBoost = new float[blendedFields.length];
@ -249,7 +250,7 @@ public class MultiMatchQuery extends MatchQuery {
for (FieldAndFieldType ft : blendedFields) {
Query query;
try {
query = ft.fieldType.termQuery(value, null);
query = ft.fieldType.termQuery(value, context);
} catch (IllegalArgumentException e) {
// the query expects a certain class of values such as numbers
// of ip addresses and the value can't be parsed, so ignore this

View File

@ -65,7 +65,7 @@ public final class ShadowIndexShard extends IndexShard {
*/
@Override
public void updateRoutingEntry(ShardRouting newRouting) throws IOException {
if (newRouting.primary() == true) {// becoming a primary
if (newRouting.primary()) {// becoming a primary
throw new IllegalStateException("can't promote shard to primary");
}
super.updateRoutingEntry(newRouting);

View File

@ -47,7 +47,6 @@ import java.util.Collections;
import java.util.Iterator;
import java.util.Set;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.TimeUnit;
/**
* The indices request cache allows to cache a shard level request stage responses, helping with improving
@ -90,7 +89,7 @@ public final class IndicesRequestCache extends AbstractComponent implements Remo
CacheBuilder<Key, Value> cacheBuilder = CacheBuilder.<Key, Value>builder()
.setMaximumWeight(sizeInBytes).weigher((k, v) -> k.ramBytesUsed() + v.ramBytesUsed()).removalListener(this);
if (expire != null) {
cacheBuilder.setExpireAfterAccess(TimeUnit.MILLISECONDS.toNanos(expire.millis()));
cacheBuilder.setExpireAfterAccess(expire);
}
cache = cacheBuilder.build();
}

View File

@ -45,6 +45,7 @@ import org.elasticsearch.cluster.routing.RecoverySource;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.breaker.CircuitBreaker;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
@ -65,6 +66,7 @@ import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.Callback;
import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.common.util.iterable.Iterables;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.env.ShardLock;
import org.elasticsearch.env.ShardLockObtainFailedException;
@ -84,6 +86,7 @@ import org.elasticsearch.index.get.GetStats;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.merge.MergeStats;
import org.elasticsearch.index.query.QueryParseContext;
import org.elasticsearch.index.recovery.RecoveryStats;
import org.elasticsearch.index.refresh.RefreshStats;
import org.elasticsearch.index.search.stats.SearchStats;
@ -106,6 +109,7 @@ import org.elasticsearch.indices.recovery.RecoveryState;
import org.elasticsearch.plugins.PluginsService;
import org.elasticsearch.repositories.RepositoriesService;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.internal.AliasFilter;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.internal.ShardSearchRequest;
import org.elasticsearch.search.query.QueryPhase;
@ -128,6 +132,7 @@ import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.stream.Collectors;
@ -1207,4 +1212,12 @@ public class IndicesService extends AbstractLifecycleComponent
(Index index, IndexSettings indexSettings) -> canDeleteIndexContents(index, indexSettings);
private final IndexDeletionAllowedPredicate ALWAYS_TRUE = (Index index, IndexSettings indexSettings) -> true;
public AliasFilter buildAliasFilter(ClusterState state, String index, String... expressions) {
Function<XContentParser, QueryParseContext> factory =
(parser) -> new QueryParseContext(indicesQueriesRegistry, parser, new ParseFieldMatcher(settings));
String[] aliases = indexNameExpressionResolver.filteringAliases(state, index, expressions);
IndexMetaData indexMetaData = state.metaData().index(index);
return new AliasFilter(ShardSearchRequest.parseAliasFilter(factory, indexMetaData, aliases), aliases);
}
}

View File

@ -19,7 +19,7 @@
package org.elasticsearch.ingest;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterStateListener;
@ -68,7 +68,7 @@ public class PipelineExecutionService implements ClusterStateListener {
});
}
public void executeBulkRequest(Iterable<ActionRequest<?>> actionRequests,
public void executeBulkRequest(Iterable<DocWriteRequest> actionRequests,
BiConsumer<IndexRequest, Exception> itemFailureHandler,
Consumer<Exception> completionHandler) {
threadPool.executor(ThreadPool.Names.BULK).execute(new AbstractRunnable() {
@ -80,7 +80,7 @@ public class PipelineExecutionService implements ClusterStateListener {
@Override
protected void doRun() throws Exception {
for (ActionRequest actionRequest : actionRequests) {
for (DocWriteRequest actionRequest : actionRequests) {
if ((actionRequest instanceof IndexRequest)) {
IndexRequest indexRequest = (IndexRequest) actionRequest;
if (Strings.hasText(indexRequest.getPipeline())) {

View File

@ -41,7 +41,7 @@ public class OsService extends AbstractComponent {
super(settings);
this.probe = OsProbe.getInstance();
TimeValue refreshInterval = REFRESH_INTERVAL_SETTING.get(settings);
this.info = probe.osInfo(refreshInterval.millis(), EsExecutors.boundedNumberOfProcessors(settings));
this.info = probe.osInfo(refreshInterval.millis(), EsExecutors.numberOfProcessors(settings));
this.osStatsCache = new OsStatsCache(refreshInterval, probe.osStats());
logger.debug("using refresh_interval [{}]", refreshInterval);
}

View File

@ -82,7 +82,7 @@ public class RestIndexAction extends BaseRestHandler {
indexRequest.waitForActiveShards(ActiveShardCount.parseString(waitForActiveShards));
}
if (sOpType != null) {
indexRequest.opType(IndexRequest.OpType.fromString(sOpType));
indexRequest.opType(sOpType);
}
return channel ->

View File

@ -136,7 +136,7 @@ public class ScriptService extends AbstractComponent implements Closeable, Clust
TimeValue cacheExpire = SCRIPT_CACHE_EXPIRE_SETTING.get(settings);
if (cacheExpire.getNanos() != 0) {
cacheBuilder.setExpireAfterAccess(cacheExpire.nanos());
cacheBuilder.setExpireAfterAccess(cacheExpire);
}
logger.debug("using script cache with max_size [{}], expire [{}]", cacheMaxSize, cacheExpire);

View File

@ -48,6 +48,7 @@ import org.elasticsearch.index.mapper.ObjectMapper;
import org.elasticsearch.index.mapper.TypeFieldMapper;
import org.elasticsearch.index.query.AbstractQueryBuilder;
import org.elasticsearch.index.query.ParsedQuery;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.similarity.SimilarityService;
@ -74,6 +75,7 @@ import org.elasticsearch.search.sort.SortAndFormats;
import org.elasticsearch.search.suggest.SuggestionSearchContext;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
@ -227,7 +229,12 @@ final class DefaultSearchContext extends SearchContext {
}
// initialize the filtering alias based on the provided filters
aliasFilter = indexService.aliasFilter(queryShardContext, request.filteringAliases());
try {
final QueryBuilder queryBuilder = request.filteringAliases();
aliasFilter = queryBuilder == null ? null : queryBuilder.toFilter(queryShardContext);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
if (query() == null) {
parsedQuery(ParsedQuery.parsedMatchAllQuery());

View File

@ -22,8 +22,10 @@ package org.elasticsearch.search;
import com.carrotsearch.hppc.ObjectFloatHashMap;
import org.apache.lucene.search.FieldDoc;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
@ -64,6 +66,7 @@ import org.elasticsearch.search.fetch.ShardFetchRequest;
import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext;
import org.elasticsearch.search.fetch.subphase.ScriptFieldsContext.ScriptField;
import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder;
import org.elasticsearch.search.internal.AliasFilter;
import org.elasticsearch.search.internal.InternalScrollSearchRequest;
import org.elasticsearch.search.internal.ScrollContext;
import org.elasticsearch.search.internal.SearchContext;
@ -518,11 +521,6 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
final SearchContext createContext(ShardSearchRequest request, @Nullable Engine.Searcher searcher) throws IOException {
final DefaultSearchContext context = createSearchContext(request, defaultSearchTimeout, searcher);
try {
// we clone the query shard context here just for rewriting otherwise we
// might end up with incorrect state since we are using now() or script services
// during rewrite and normalized / evaluate templates etc.
request.rewrite(new QueryShardContext(context.getQueryShardContext()));
assert context.getQueryShardContext().isCachable();
if (request.scroll() != null) {
context.scrollContext(new ScrollContext());
context.scrollContext().scroll = request.scroll();
@ -556,16 +554,30 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
return context;
}
public DefaultSearchContext createSearchContext(ShardSearchRequest request, TimeValue timeout, @Nullable Engine.Searcher searcher) {
public DefaultSearchContext createSearchContext(ShardSearchRequest request, TimeValue timeout, @Nullable Engine.Searcher searcher)
throws IOException {
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
IndexShard indexShard = indexService.getShard(request.shardId().getId());
SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().getId(), indexShard.shardId());
Engine.Searcher engineSearcher = searcher == null ? indexShard.acquireSearcher("search") : searcher;
return new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget, engineSearcher,
indexService,
indexShard, bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher,
final DefaultSearchContext searchContext = new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget,
engineSearcher, indexService, indexShard, bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher,
timeout, fetchPhase);
boolean success = false;
try {
// we clone the query shard context here just for rewriting otherwise we
// might end up with incorrect state since we are using now() or script services
// during rewrite and normalized / evaluate templates etc.
request.rewrite(new QueryShardContext(searchContext.getQueryShardContext()));
assert searchContext.getQueryShardContext().isCachable();
success = true;
} finally {
if (success == false) {
IOUtils.closeWhileHandlingException(searchContext);
}
}
return searchContext;
}
private void freeAllContextForIndex(Index index) {
@ -859,4 +871,8 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
}
}
}
public AliasFilter buildAliasFilter(ClusterState state, String index, String... expressions) {
return indicesService.buildAliasFilter(state, index, expressions);
}
}

View File

@ -0,0 +1,121 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.internal;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryRewriteContext;
import java.io.IOException;
import java.util.Arrays;
import java.util.Objects;
/**
* Represents a {@link QueryBuilder} and a list of alias names that filters the builder is composed of.
*/
public final class AliasFilter implements Writeable {
public static final Version V_5_1_0 = Version.fromId(5010099);
private final String[] aliases;
private final QueryBuilder filter;
private final boolean reparseAliases;
public AliasFilter(QueryBuilder filter, String... aliases) {
this.aliases = aliases == null ? Strings.EMPTY_ARRAY : aliases;
this.filter = filter;
reparseAliases = false; // no bwc here - we only do this if we parse the filter
}
public AliasFilter(StreamInput input) throws IOException {
aliases = input.readStringArray();
if (input.getVersion().onOrAfter(V_5_1_0)) {
filter = input.readOptionalNamedWriteable(QueryBuilder.class);
reparseAliases = false;
} else {
reparseAliases = true; // alright we read from 5.0
filter = null;
}
}
private QueryBuilder reparseFilter(QueryRewriteContext context) {
if (reparseAliases) {
// we are processing a filter received from a 5.0 node - we need to reparse this on the executing node
final IndexMetaData indexMetaData = context.getIndexSettings().getIndexMetaData();
return ShardSearchRequest.parseAliasFilter(context::newParseContext, indexMetaData, aliases);
}
return filter;
}
AliasFilter rewrite(QueryRewriteContext context) throws IOException {
QueryBuilder queryBuilder = reparseFilter(context);
if (queryBuilder != null) {
return new AliasFilter(QueryBuilder.rewriteQuery(queryBuilder, context), aliases);
}
return new AliasFilter(filter, aliases);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeStringArray(aliases);
if (out.getVersion().onOrAfter(V_5_1_0)) {
out.writeOptionalNamedWriteable(filter);
}
}
/**
* Returns the aliases patters that are used to compose the {@link QueryBuilder}
* returned from {@link #getQueryBuilder()}
*/
public String[] getAliases() {
return aliases;
}
/**
* Returns the alias filter {@link QueryBuilder} or <code>null</code> if there is no such filter
*/
public QueryBuilder getQueryBuilder() {
if (reparseAliases) {
// this is only for BWC since 5.0 still only sends aliases so this must be rewritten on the executing node
// if we talk to an older node we also only forward/write the string array which is compatible with the consumers
// in 5.0 see ExplainRequest and QueryValidationRequest
throw new IllegalStateException("alias filter for aliases: " + Arrays.toString(aliases) + " must be rewritten first");
}
return filter;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
AliasFilter that = (AliasFilter) o;
return reparseAliases == that.reparseAliases &&
Arrays.equals(aliases, that.aliases) &&
Objects.equals(filter, that.filter);
}
@Override
public int hashCode() {
return Objects.hash(aliases, filter, reparseAliases);
}
}

View File

@ -28,6 +28,7 @@ import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.search.Scroll;
@ -62,7 +63,7 @@ public class ShardSearchLocalRequest implements ShardSearchRequest {
private SearchType searchType;
private Scroll scroll;
private String[] types = Strings.EMPTY_ARRAY;
private String[] filteringAliases;
private AliasFilter aliasFilter;
private SearchSourceBuilder source;
private Boolean requestCache;
private long nowInMillis;
@ -73,29 +74,29 @@ public class ShardSearchLocalRequest implements ShardSearchRequest {
}
ShardSearchLocalRequest(SearchRequest searchRequest, ShardRouting shardRouting, int numberOfShards,
String[] filteringAliases, long nowInMillis) {
AliasFilter aliasFilter, long nowInMillis) {
this(shardRouting.shardId(), numberOfShards, searchRequest.searchType(),
searchRequest.source(), searchRequest.types(), searchRequest.requestCache());
searchRequest.source(), searchRequest.types(), searchRequest.requestCache(), aliasFilter);
this.scroll = searchRequest.scroll();
this.filteringAliases = filteringAliases;
this.nowInMillis = nowInMillis;
}
public ShardSearchLocalRequest(ShardId shardId, String[] types, long nowInMillis, String[] filteringAliases) {
public ShardSearchLocalRequest(ShardId shardId, String[] types, long nowInMillis, AliasFilter aliasFilter) {
this.types = types;
this.nowInMillis = nowInMillis;
this.filteringAliases = filteringAliases;
this.aliasFilter = aliasFilter;
this.shardId = shardId;
}
public ShardSearchLocalRequest(ShardId shardId, int numberOfShards, SearchType searchType, SearchSourceBuilder source, String[] types,
Boolean requestCache) {
Boolean requestCache, AliasFilter aliasFilter) {
this.shardId = shardId;
this.numberOfShards = numberOfShards;
this.searchType = searchType;
this.source = source;
this.types = types;
this.requestCache = requestCache;
this.aliasFilter = aliasFilter;
}
@ -130,8 +131,8 @@ public class ShardSearchLocalRequest implements ShardSearchRequest {
}
@Override
public String[] filteringAliases() {
return filteringAliases;
public QueryBuilder filteringAliases() {
return aliasFilter.getQueryBuilder();
}
@Override
@ -166,7 +167,7 @@ public class ShardSearchLocalRequest implements ShardSearchRequest {
scroll = in.readOptionalWriteable(Scroll::new);
source = in.readOptionalWriteable(SearchSourceBuilder::new);
types = in.readStringArray();
filteringAliases = in.readStringArray();
aliasFilter = new AliasFilter(in);
nowInMillis = in.readVLong();
requestCache = in.readOptionalBoolean();
}
@ -180,7 +181,7 @@ public class ShardSearchLocalRequest implements ShardSearchRequest {
out.writeOptionalWriteable(scroll);
out.writeOptionalWriteable(source);
out.writeStringArray(types);
out.writeStringArrayNullable(filteringAliases);
aliasFilter.writeTo(out);
if (!asKey) {
out.writeVLong(nowInMillis);
}
@ -200,6 +201,7 @@ public class ShardSearchLocalRequest implements ShardSearchRequest {
public void rewrite(QueryShardContext context) throws IOException {
SearchSourceBuilder source = this.source;
SearchSourceBuilder rewritten = null;
aliasFilter = aliasFilter.rewrite(context);
while (rewritten != source) {
rewritten = source.rewrite(context);
source = rewritten;

View File

@ -20,13 +20,26 @@
package org.elasticsearch.search.internal;
import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.cluster.metadata.AliasMetaData;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.query.BoolQueryBuilder;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryParseContext;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.AliasFilterParsingException;
import org.elasticsearch.indices.InvalidAliasNameException;
import org.elasticsearch.search.Scroll;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import java.io.IOException;
import java.util.Optional;
import java.util.function.Function;
/**
* Shard level request that represents a search.
@ -47,7 +60,7 @@ public interface ShardSearchRequest {
SearchType searchType();
String[] filteringAliases();
QueryBuilder filteringAliases();
long nowInMillis();
@ -76,4 +89,64 @@ public interface ShardSearchRequest {
* QueryBuilder.
*/
void rewrite(QueryShardContext context) throws IOException;
/**
* Returns the filter associated with listed filtering aliases.
* <p>
* The list of filtering aliases should be obtained by calling MetaData.filteringAliases.
* Returns <tt>null</tt> if no filtering is required.</p>
*/
static QueryBuilder parseAliasFilter(Function<XContentParser, QueryParseContext> contextFactory,
IndexMetaData metaData, String... aliasNames) {
if (aliasNames == null || aliasNames.length == 0) {
return null;
}
Index index = metaData.getIndex();
ImmutableOpenMap<String, AliasMetaData> aliases = metaData.getAliases();
Function<AliasMetaData, QueryBuilder> parserFunction = (alias) -> {
if (alias.filter() == null) {
return null;
}
try {
byte[] filterSource = alias.filter().uncompressed();
try (XContentParser parser = XContentFactory.xContent(filterSource).createParser(filterSource)) {
Optional<QueryBuilder> innerQueryBuilder = contextFactory.apply(parser).parseInnerQueryBuilder();
if (innerQueryBuilder.isPresent()) {
return innerQueryBuilder.get();
}
return null;
}
} catch (IOException ex) {
throw new AliasFilterParsingException(index, alias.getAlias(), "Invalid alias filter", ex);
}
};
if (aliasNames.length == 1) {
AliasMetaData alias = aliases.get(aliasNames[0]);
if (alias == null) {
// This shouldn't happen unless alias disappeared after filteringAliases was called.
throw new InvalidAliasNameException(index, aliasNames[0], "Unknown alias name was passed to alias Filter");
}
return parserFunction.apply(alias);
} else {
// we need to bench here a bit, to see maybe it makes sense to use OrFilter
BoolQueryBuilder combined = new BoolQueryBuilder();
for (String aliasName : aliasNames) {
AliasMetaData alias = aliases.get(aliasName);
if (alias == null) {
// This shouldn't happen unless alias disappeared after filteringAliases was called.
throw new InvalidAliasNameException(index, aliasNames[0],
"Unknown alias name was passed to alias Filter");
}
QueryBuilder parsedFilter = parserFunction.apply(alias);
if (parsedFilter != null) {
combined.should(parsedFilter);
} else {
// The filter might be null only if filter was removed after filteringAliases was called
return null;
}
}
return combined;
}
}
}

View File

@ -28,6 +28,7 @@ import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.search.Scroll;
@ -51,8 +52,8 @@ public class ShardSearchTransportRequest extends TransportRequest implements Sha
}
public ShardSearchTransportRequest(SearchRequest searchRequest, ShardRouting shardRouting, int numberOfShards,
String[] filteringAliases, long nowInMillis) {
this.shardSearchLocalRequest = new ShardSearchLocalRequest(searchRequest, shardRouting, numberOfShards, filteringAliases, nowInMillis);
AliasFilter aliasFilter, long nowInMillis) {
this.shardSearchLocalRequest = new ShardSearchLocalRequest(searchRequest, shardRouting, numberOfShards, aliasFilter, nowInMillis);
this.originalIndices = new OriginalIndices(searchRequest);
}
@ -104,7 +105,7 @@ public class ShardSearchTransportRequest extends TransportRequest implements Sha
}
@Override
public String[] filteringAliases() {
public QueryBuilder filteringAliases() {
return shardSearchLocalRequest.filteringAliases();
}

View File

@ -452,7 +452,7 @@ public class GeoDistanceSortBuilder extends SortBuilder<GeoDistanceSortBuilder>
geoDistance = GeoDistance.fromString(parser.text());
} else if (parseFieldMatcher.match(currentName, COERCE_FIELD)) {
coerce = parser.booleanValue();
if (coerce == true) {
if (coerce) {
ignoreMalformed = true;
}
} else if (parseFieldMatcher.match(currentName, IGNORE_MALFORMED_FIELD)) {

View File

@ -23,7 +23,6 @@ import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.SizeValue;
import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.node.Node;
@ -79,7 +78,7 @@ public final class FixedExecutorBuilder extends ExecutorBuilder<FixedExecutorBui
private int applyHardSizeLimit(final Settings settings, final String name) {
if (name.equals(ThreadPool.Names.BULK) || name.equals(ThreadPool.Names.INDEX)) {
return 1 + EsExecutors.boundedNumberOfProcessors(settings);
return 1 + EsExecutors.numberOfProcessors(settings);
} else {
return Integer.MAX_VALUE;
}

View File

@ -163,7 +163,7 @@ public class ThreadPool extends AbstractComponent implements Closeable {
assert Node.NODE_NAME_SETTING.exists(settings);
final Map<String, ExecutorBuilder> builders = new HashMap<>();
final int availableProcessors = EsExecutors.boundedNumberOfProcessors(settings);
final int availableProcessors = EsExecutors.numberOfProcessors(settings);
final int halfProcMaxAt5 = halfNumberOfProcessorsMaxFive(availableProcessors);
final int halfProcMaxAt10 = halfNumberOfProcessorsMaxTen(availableProcessors);
final int genericThreadPoolMax = boundedBy(4 * availableProcessors, 128, 512);

View File

@ -19,9 +19,11 @@
package org.elasticsearch;
import org.elasticsearch.action.ShardValidateQueryRequestTests;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.search.internal.AliasFilter;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.VersionUtils;
import org.hamcrest.Matchers;
@ -279,4 +281,19 @@ public class VersionTests extends ESTestCase {
}
}
}
private static final Version V_20_0_0_UNRELEASED = new Version(20000099, Version.CURRENT.luceneVersion);
// see comment in Version.java about this test
public void testUnknownVersions() {
assertUnknownVersion(V_20_0_0_UNRELEASED);
expectThrows(AssertionError.class, () -> assertUnknownVersion(Version.CURRENT));
assertUnknownVersion(AliasFilter.V_5_1_0); // once we released 5.1.0 and it's added to Version.java we need to remove this constant
// once we released 5.0.0 and it's added to Version.java we need to remove this constant
assertUnknownVersion(ShardValidateQueryRequestTests.V_5_0_0);
}
public static void assertUnknownVersion(Version version) {
assertFalse("Version " + version + " has been releaed don't use a new instance of this version",
VersionUtils.allVersions().contains(version));
}
}

View File

@ -0,0 +1,112 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action;
import org.elasticsearch.action.explain.ExplainRequest;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.indices.IndicesModule;
import org.elasticsearch.search.SearchModule;
import org.elasticsearch.search.SearchRequestParsers;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import org.elasticsearch.search.internal.AliasFilter;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Base64;
import java.util.Collections;
import java.util.List;
public class ExplainRequestTests extends ESTestCase {
protected NamedWriteableRegistry namedWriteableRegistry;
protected SearchRequestParsers searchRequestParsers;
public void setUp() throws Exception {
super.setUp();
IndicesModule indicesModule = new IndicesModule(Collections.emptyList());
SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList());
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
entries.addAll(indicesModule.getNamedWriteables());
entries.addAll(searchModule.getNamedWriteables());
namedWriteableRegistry = new NamedWriteableRegistry(entries);
searchRequestParsers = searchModule.getSearchRequestParsers();
}
public void testSerialize() throws IOException {
try (BytesStreamOutput output = new BytesStreamOutput()) {
ExplainRequest request = new ExplainRequest("index", "type", "id");
request.fetchSourceContext(new FetchSourceContext(true, new String[]{"field1.*"}, new String[] {"field2.*"}));
request.filteringAlias(new AliasFilter(QueryBuilders.termQuery("filter_field", "value"), new String[] {"alias0", "alias1"}));
request.preference("the_preference");
request.query(QueryBuilders.termQuery("field", "value"));
request.storedFields(new String[] {"field1", "field2"});
request.routing("some_routing");
request.writeTo(output);
try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) {
ExplainRequest readRequest = new ExplainRequest();
readRequest.readFrom(in);
assertEquals(request.filteringAlias(), readRequest.filteringAlias());
assertArrayEquals(request.storedFields(), readRequest.storedFields());
assertEquals(request.preference(), readRequest.preference());
assertEquals(request.query(), readRequest.query());
assertEquals(request.routing(), readRequest.routing());
assertEquals(request.fetchSourceContext(), readRequest.fetchSourceContext());
}
}
}
// BWC test for changes from #20916
public void testSerialize50Request() throws IOException {
ExplainRequest request = new ExplainRequest("index", "type", "id");
request.fetchSourceContext(new FetchSourceContext(true, new String[]{"field1.*"}, new String[] {"field2.*"}));
request.filteringAlias(new AliasFilter(QueryBuilders.termQuery("filter_field", "value"), new String[] {"alias0", "alias1"}));
request.preference("the_preference");
request.query(QueryBuilders.termQuery("field", "value"));
request.storedFields(new String[] {"field1", "field2"});
request.routing("some_routing");
BytesArray requestBytes = new BytesArray(Base64.getDecoder()
// this is a base64 encoded request generated with the same input
.decode("AAABBWluZGV4BHR5cGUCaWQBDHNvbWVfcm91dGluZwEOdGhlX3ByZWZlcmVuY2UEdGVybT" +
"+AAAAABWZpZWxkFQV2YWx1ZQIGYWxpYXMwBmFsaWFzMQECBmZpZWxkMQZmaWVsZDIBAQEIZmllbGQxLioBCGZpZWxkMi4qAA"));
try (StreamInput in = new NamedWriteableAwareStreamInput(requestBytes.streamInput(), namedWriteableRegistry)) {
in.setVersion(ShardValidateQueryRequestTests.V_5_0_0);
ExplainRequest readRequest = new ExplainRequest();
readRequest.readFrom(in);
assertEquals(0, in.available());
assertArrayEquals(request.filteringAlias().getAliases(), readRequest.filteringAlias().getAliases());
expectThrows(IllegalStateException.class, () -> readRequest.filteringAlias().getQueryBuilder());
assertArrayEquals(request.storedFields(), readRequest.storedFields());
assertEquals(request.preference(), readRequest.preference());
assertEquals(request.query(), readRequest.query());
assertEquals(request.routing(), readRequest.routing());
assertEquals(request.fetchSourceContext(), readRequest.fetchSourceContext());
BytesStreamOutput output = new BytesStreamOutput();
output.setVersion(ShardValidateQueryRequestTests.V_5_0_0);
readRequest.writeTo(output);
assertEquals(output.bytes().toBytesRef(), requestBytes.toBytesRef());
}
}
}

View File

@ -0,0 +1,114 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.indices.validate.query.ShardValidateQueryRequest;
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.IndicesModule;
import org.elasticsearch.search.SearchModule;
import org.elasticsearch.search.SearchRequestParsers;
import org.elasticsearch.search.internal.AliasFilter;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Base64;
import java.util.Collections;
import java.util.List;
public class ShardValidateQueryRequestTests extends ESTestCase {
public static final Version V_5_0_0 = Version.fromId(5000099);
protected NamedWriteableRegistry namedWriteableRegistry;
protected SearchRequestParsers searchRequestParsers;
public void setUp() throws Exception {
super.setUp();
IndicesModule indicesModule = new IndicesModule(Collections.emptyList());
SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList());
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
entries.addAll(indicesModule.getNamedWriteables());
entries.addAll(searchModule.getNamedWriteables());
namedWriteableRegistry = new NamedWriteableRegistry(entries);
searchRequestParsers = searchModule.getSearchRequestParsers();
}
public void testSerialize() throws IOException {
try (BytesStreamOutput output = new BytesStreamOutput()) {
ValidateQueryRequest validateQueryRequest = new ValidateQueryRequest("indices");
validateQueryRequest.query(QueryBuilders.termQuery("field", "value"));
validateQueryRequest.rewrite(true);
validateQueryRequest.explain(false);
validateQueryRequest.types("type1", "type2");
ShardValidateQueryRequest request = new ShardValidateQueryRequest(new ShardId("index", "foobar", 1),
new AliasFilter(QueryBuilders.termQuery("filter_field", "value"), new String[] {"alias0", "alias1"}), validateQueryRequest);
request.writeTo(output);
try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) {
ShardValidateQueryRequest readRequest = new ShardValidateQueryRequest();
readRequest.readFrom(in);
assertEquals(request.filteringAliases(), readRequest.filteringAliases());
assertArrayEquals(request.types(), readRequest.types());
assertEquals(request.explain(), readRequest.explain());
assertEquals(request.query(), readRequest.query());
assertEquals(request.rewrite(), readRequest.rewrite());
assertEquals(request.shardId(), readRequest.shardId());
}
}
}
// BWC test for changes from #20916
public void testSerialize50Request() throws IOException {
ValidateQueryRequest validateQueryRequest = new ValidateQueryRequest("indices");
validateQueryRequest.query(QueryBuilders.termQuery("field", "value"));
validateQueryRequest.rewrite(true);
validateQueryRequest.explain(false);
validateQueryRequest.types("type1", "type2");
ShardValidateQueryRequest request = new ShardValidateQueryRequest(new ShardId("index", "foobar", 1),
new AliasFilter(QueryBuilders.termQuery("filter_field", "value"), new String[] {"alias0", "alias1"}), validateQueryRequest);
BytesArray requestBytes = new BytesArray(Base64.getDecoder()
// this is a base64 encoded request generated with the same input
.decode("AAVpbmRleAZmb29iYXIBAQdpbmRpY2VzBAR0ZXJtP4AAAAAFZmllbGQVBXZhbHVlAgV0eXBlMQV0eXBlMgIGYWxpYXMwBmFsaWFzMQABAA"));
try (StreamInput in = new NamedWriteableAwareStreamInput(requestBytes.streamInput(), namedWriteableRegistry)) {
in.setVersion(V_5_0_0);
ShardValidateQueryRequest readRequest = new ShardValidateQueryRequest();
readRequest.readFrom(in);
assertEquals(0, in.available());
assertArrayEquals(request.filteringAliases().getAliases(), readRequest.filteringAliases().getAliases());
expectThrows(IllegalStateException.class, () -> readRequest.filteringAliases().getQueryBuilder());
assertArrayEquals(request.types(), readRequest.types());
assertEquals(request.explain(), readRequest.explain());
assertEquals(request.query(), readRequest.query());
assertEquals(request.rewrite(), readRequest.rewrite());
assertEquals(request.shardId(), readRequest.shardId());
BytesStreamOutput output = new BytesStreamOutput();
output.setVersion(V_5_0_0);
readRequest.writeTo(output);
assertEquals(output.bytes().toBytesRef(), requestBytes.toBytesRef());
}
}
}

View File

@ -20,8 +20,8 @@
package org.elasticsearch.action.bulk;
import org.apache.lucene.util.Constants;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.WriteRequest.RefreshPolicy;
@ -113,7 +113,7 @@ public class BulkRequestTests extends ESTestCase {
public void testBulkAddIterable() {
BulkRequest bulkRequest = Requests.bulkRequest();
List<ActionRequest<?>> requests = new ArrayList<>();
List<DocWriteRequest> requests = new ArrayList<>();
requests.add(new IndexRequest("test", "test", "id").source("field", "value"));
requests.add(new UpdateRequest("test", "test", "id").doc("field", "value"));
requests.add(new DeleteRequest("test", "test", "id"));

View File

@ -47,6 +47,7 @@ import java.util.Map;
import java.util.concurrent.CyclicBarrier;
import java.util.function.Function;
import static org.elasticsearch.action.DocWriteRequest.OpType;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.script.ScriptService.ScriptType;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
@ -309,7 +310,7 @@ public class BulkWithUpdatesIT extends ESIntegTestCase {
assertThat(response.getItems()[i].getVersion(), equalTo(1L));
assertThat(response.getItems()[i].getIndex(), equalTo("test"));
assertThat(response.getItems()[i].getType(), equalTo("type1"));
assertThat(response.getItems()[i].getOpType(), equalTo("update"));
assertThat(response.getItems()[i].getOpType(), equalTo(OpType.UPDATE));
assertThat(response.getItems()[i].getResponse().getId(), equalTo(Integer.toString(i)));
assertThat(response.getItems()[i].getResponse().getVersion(), equalTo(1L));
assertThat(((UpdateResponse) response.getItems()[i].getResponse()).getGetResult().field("counter").getValue(), equalTo(1));
@ -347,7 +348,7 @@ public class BulkWithUpdatesIT extends ESIntegTestCase {
assertThat(response.getItems()[i].getVersion(), equalTo(2L));
assertThat(response.getItems()[i].getIndex(), equalTo("test"));
assertThat(response.getItems()[i].getType(), equalTo("type1"));
assertThat(response.getItems()[i].getOpType(), equalTo("update"));
assertThat(response.getItems()[i].getOpType(), equalTo(OpType.UPDATE));
assertThat(response.getItems()[i].getResponse().getId(), equalTo(Integer.toString(i)));
assertThat(response.getItems()[i].getResponse().getVersion(), equalTo(2L));
assertThat(((UpdateResponse) response.getItems()[i].getResponse()).getGetResult().field("counter").getValue(), equalTo(2));
@ -371,7 +372,7 @@ public class BulkWithUpdatesIT extends ESIntegTestCase {
assertThat(response.getItems()[i].getVersion(), equalTo(3L));
assertThat(response.getItems()[i].getIndex(), equalTo("test"));
assertThat(response.getItems()[i].getType(), equalTo("type1"));
assertThat(response.getItems()[i].getOpType(), equalTo("update"));
assertThat(response.getItems()[i].getOpType(), equalTo(OpType.UPDATE));
}
}
@ -388,7 +389,7 @@ public class BulkWithUpdatesIT extends ESIntegTestCase {
assertThat(response.getItems()[i].getId(), equalTo(Integer.toString(i)));
assertThat(response.getItems()[i].getIndex(), equalTo("test"));
assertThat(response.getItems()[i].getType(), equalTo("type1"));
assertThat(response.getItems()[i].getOpType(), equalTo("update"));
assertThat(response.getItems()[i].getOpType(), equalTo(OpType.UPDATE));
}
builder = client().prepareBulk();
@ -404,7 +405,7 @@ public class BulkWithUpdatesIT extends ESIntegTestCase {
assertThat(response.getItems()[i].getId(), equalTo(Integer.toString(i)));
assertThat(response.getItems()[i].getIndex(), equalTo("test"));
assertThat(response.getItems()[i].getType(), equalTo("type1"));
assertThat(response.getItems()[i].getOpType(), equalTo("update"));
assertThat(response.getItems()[i].getOpType(), equalTo(OpType.UPDATE));
for (int j = 0; j < 5; j++) {
GetResponse getResponse = client().prepareGet("test", "type1", Integer.toString(i)).get();
assertThat(getResponse.isExists(), equalTo(false));
@ -747,12 +748,12 @@ public class BulkWithUpdatesIT extends ESIntegTestCase {
assertNoFailures(indexBulkItemResponse);
assertThat(bulkItemResponse.getItems().length, is(6));
assertThat(bulkItemResponse.getItems()[0].getOpType(), is("index"));
assertThat(bulkItemResponse.getItems()[1].getOpType(), is("index"));
assertThat(bulkItemResponse.getItems()[2].getOpType(), is("update"));
assertThat(bulkItemResponse.getItems()[3].getOpType(), is("update"));
assertThat(bulkItemResponse.getItems()[4].getOpType(), is("delete"));
assertThat(bulkItemResponse.getItems()[5].getOpType(), is("delete"));
assertThat(bulkItemResponse.getItems()[0].getOpType(), is(OpType.INDEX));
assertThat(bulkItemResponse.getItems()[1].getOpType(), is(OpType.INDEX));
assertThat(bulkItemResponse.getItems()[2].getOpType(), is(OpType.UPDATE));
assertThat(bulkItemResponse.getItems()[3].getOpType(), is(OpType.UPDATE));
assertThat(bulkItemResponse.getItems()[4].getOpType(), is(OpType.DELETE));
assertThat(bulkItemResponse.getItems()[5].getOpType(), is(OpType.DELETE));
}
private static String indexOrAlias() {
@ -797,9 +798,9 @@ public class BulkWithUpdatesIT extends ESIntegTestCase {
assertThat(bulkResponse.hasFailures(), is(true));
BulkItemResponse[] responseItems = bulkResponse.getItems();
assertThat(responseItems.length, is(3));
assertThat(responseItems[0].getOpType(), is("index"));
assertThat(responseItems[1].getOpType(), is("update"));
assertThat(responseItems[2].getOpType(), is("delete"));
assertThat(responseItems[0].getOpType(), is(OpType.INDEX));
assertThat(responseItems[1].getOpType(), is(OpType.UPDATE));
assertThat(responseItems[2].getOpType(), is(OpType.DELETE));
}
// issue 9821
@ -809,9 +810,9 @@ public class BulkWithUpdatesIT extends ESIntegTestCase {
.add(client().prepareUpdate().setIndex("INVALID.NAME").setType("type1").setId("1").setDoc("field", randomInt()))
.add(client().prepareDelete().setIndex("INVALID.NAME").setType("type1").setId("1")).get();
assertThat(bulkResponse.getItems().length, is(3));
assertThat(bulkResponse.getItems()[0].getOpType(), is("index"));
assertThat(bulkResponse.getItems()[1].getOpType(), is("update"));
assertThat(bulkResponse.getItems()[2].getOpType(), is("delete"));
assertThat(bulkResponse.getItems()[0].getOpType(), is(OpType.INDEX));
assertThat(bulkResponse.getItems()[1].getOpType(), is(OpType.UPDATE));
assertThat(bulkResponse.getItems()[2].getOpType(), is(OpType.DELETE));
}
}

View File

@ -20,6 +20,7 @@ package org.elasticsearch.action.bulk;
import org.elasticsearch.action.ActionFuture;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.DocWriteRequest.OpType;
import org.elasticsearch.action.delete.DeleteResponse;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.action.update.UpdateRequest;
@ -212,11 +213,11 @@ public class RetryTests extends ESTestCase {
}
private BulkItemResponse successfulResponse() {
return new BulkItemResponse(1, "update", new DeleteResponse());
return new BulkItemResponse(1, OpType.DELETE, new DeleteResponse());
}
private BulkItemResponse failedResponse() {
return new BulkItemResponse(1, "update", new BulkItemResponse.Failure("test", "test", "1", new EsRejectedExecutionException("pool full")));
return new BulkItemResponse(1, OpType.INDEX, new BulkItemResponse.Failure("test", "test", "1", new EsRejectedExecutionException("pool full")));
}
}
}

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.index;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.support.replication.ReplicationResponse;
import org.elasticsearch.common.unit.TimeValue;
@ -44,18 +45,24 @@ public class IndexRequestTests extends ESTestCase {
String createUpper = "CREATE";
String indexUpper = "INDEX";
assertThat(IndexRequest.OpType.fromString(create), equalTo(IndexRequest.OpType.CREATE));
assertThat(IndexRequest.OpType.fromString(index), equalTo(IndexRequest.OpType.INDEX));
assertThat(IndexRequest.OpType.fromString(createUpper), equalTo(IndexRequest.OpType.CREATE));
assertThat(IndexRequest.OpType.fromString(indexUpper), equalTo(IndexRequest.OpType.INDEX));
IndexRequest indexRequest = new IndexRequest("");
indexRequest.opType(create);
assertThat(indexRequest.opType() , equalTo(DocWriteRequest.OpType.CREATE));
indexRequest.opType(createUpper);
assertThat(indexRequest.opType() , equalTo(DocWriteRequest.OpType.CREATE));
indexRequest.opType(index);
assertThat(indexRequest.opType() , equalTo(DocWriteRequest.OpType.INDEX));
indexRequest.opType(indexUpper);
assertThat(indexRequest.opType() , equalTo(DocWriteRequest.OpType.INDEX));
}
public void testReadBogusString() {
try {
IndexRequest.OpType.fromString("foobar");
IndexRequest indexRequest = new IndexRequest("");
indexRequest.opType("foobar");
fail("Expected IllegalArgumentException");
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), containsString("opType [foobar] not allowed"));
assertThat(e.getMessage(), equalTo("opType must be 'create' or 'index', found: [foobar]"));
}
}

View File

@ -20,7 +20,7 @@ package org.elasticsearch.action.ingest;
*/
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.bulk.BulkItemResponse;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
@ -116,10 +116,10 @@ public class BulkRequestModifierTests extends ESTestCase {
});
List<BulkItemResponse> originalResponses = new ArrayList<>();
for (ActionRequest actionRequest : bulkRequest.requests()) {
for (DocWriteRequest actionRequest : bulkRequest.requests()) {
IndexRequest indexRequest = (IndexRequest) actionRequest;
IndexResponse indexResponse = new IndexResponse(new ShardId("index", "_na_", 0), indexRequest.type(), indexRequest.id(), 1, true);
originalResponses.add(new BulkItemResponse(Integer.parseInt(indexRequest.id()), indexRequest.opType().lowercase(), indexResponse));
originalResponses.add(new BulkItemResponse(Integer.parseInt(indexRequest.id()), indexRequest.opType(), indexResponse));
}
bulkResponseListener.onResponse(new BulkResponse(originalResponses.toArray(new BulkItemResponse[originalResponses.size()]), 0));

View File

@ -20,7 +20,7 @@
package org.elasticsearch.action.ingest;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.bulk.BulkAction;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.delete.DeleteRequest;
@ -174,7 +174,7 @@ public class IngestActionFilterTests extends ESTestCase {
int numRequest = scaledRandomIntBetween(8, 64);
for (int i = 0; i < numRequest; i++) {
if (rarely()) {
ActionRequest request;
DocWriteRequest request;
if (randomBoolean()) {
request = new DeleteRequest("_index", "_type", "_id");
} else {
@ -196,7 +196,7 @@ public class IngestActionFilterTests extends ESTestCase {
verifyZeroInteractions(actionListener);
int assertedRequests = 0;
for (ActionRequest actionRequest : bulkRequest.requests()) {
for (DocWriteRequest actionRequest : bulkRequest.requests()) {
if (actionRequest instanceof IndexRequest) {
IndexRequest indexRequest = (IndexRequest) actionRequest;
assertThat(indexRequest.sourceAsMap().size(), equalTo(2));

View File

@ -0,0 +1,211 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.search;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.cluster.routing.PlainShardIterator;
import org.elasticsearch.cluster.routing.RecoverySource;
import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.search.SearchPhaseResult;
import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Executor;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
public class SearchAsyncActionTests extends ESTestCase {
public void testFanOutAndCollect() throws InterruptedException {
SearchRequest request = new SearchRequest();
CountDownLatch latch = new CountDownLatch(1);
AtomicReference<TestSearchResponse> response = new AtomicReference<>();
ActionListener<SearchResponse> responseListener = new ActionListener<SearchResponse>() {
@Override
public void onResponse(SearchResponse searchResponse) {
response.set((TestSearchResponse) searchResponse);
}
@Override
public void onFailure(Exception e) {
logger.warn("test failed", e);
fail(e.getMessage());
}
};
DiscoveryNode primaryNode = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Version.CURRENT);
DiscoveryNode replicaNode = new DiscoveryNode("node_2", buildNewFakeTransportAddress(), Version.CURRENT);
Map<DiscoveryNode, Set<Long>> nodeToContextMap = new HashMap<>();
AtomicInteger contextIdGenerator = new AtomicInteger(0);
GroupShardsIterator shardsIter = getShardsIter("idx", randomIntBetween(1, 10), randomBoolean(), primaryNode, replicaNode);
AtomicInteger numFreedContext = new AtomicInteger();
SearchTransportService transportService = new SearchTransportService(Settings.EMPTY, null) {
@Override
public void sendFreeContext(DiscoveryNode node, long contextId, SearchRequest request) {
numFreedContext.incrementAndGet();
assertTrue(nodeToContextMap.containsKey(node));
assertTrue(nodeToContextMap.get(node).remove(contextId));
}
};
Map<String, DiscoveryNode> lookup = new HashMap<>();
lookup.put(primaryNode.getId(), primaryNode);
AbstractSearchAsyncAction asyncAction = new AbstractSearchAsyncAction<TestSearchPhaseResult>(logger, transportService, lookup::get,
Collections.emptyMap(), null, request, responseListener, shardsIter, 0, 0) {
TestSearchResponse response = new TestSearchResponse();
@Override
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request, ActionListener listener) {
assertTrue("shard: " + request.shardId() + " has been queried twice", response.queried.add(request.shardId()));
TestSearchPhaseResult testSearchPhaseResult = new TestSearchPhaseResult(contextIdGenerator.incrementAndGet(), node);
Set<Long> ids = nodeToContextMap.computeIfAbsent(node, (n) -> new HashSet<>());
ids.add(testSearchPhaseResult.id);
if (randomBoolean()) {
listener.onResponse(testSearchPhaseResult);
} else {
new Thread(() -> listener.onResponse(testSearchPhaseResult)).start();
}
}
@Override
protected void moveToSecondPhase() throws Exception {
for (int i = 0; i < firstResults.length(); i++) {
TestSearchPhaseResult result = firstResults.get(i);
assertEquals(result.node.getId(), result.shardTarget().getNodeId());
sendReleaseSearchContext(result.id(), result.node);
}
responseListener.onResponse(response);
latch.countDown();
}
@Override
protected String firstPhaseName() {
return "test";
}
@Override
protected Executor getExecutor() {
fail("no executor in this class");
return null;
}
};
asyncAction.start();
latch.await();
assertNotNull(response.get());
assertFalse(nodeToContextMap.isEmpty());
assertTrue(nodeToContextMap.containsKey(primaryNode));
assertEquals(shardsIter.size(), numFreedContext.get());
assertTrue(nodeToContextMap.get(primaryNode).toString(), nodeToContextMap.get(primaryNode).isEmpty());
}
private GroupShardsIterator getShardsIter(String index, int numShards, boolean doReplicas, DiscoveryNode primaryNode,
DiscoveryNode replicaNode) {
ArrayList<ShardIterator> list = new ArrayList<>();
for (int i = 0; i < numShards; i++) {
ArrayList<ShardRouting> started = new ArrayList<>();
ArrayList<ShardRouting> initializing = new ArrayList<>();
ArrayList<ShardRouting> unassigned = new ArrayList<>();
ShardRouting routing = ShardRouting.newUnassigned(new ShardId(new Index(index, "_na_"), i), true,
RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foobar"));
routing = routing.initialize(primaryNode.getId(), i + "p", 0);
routing.started();
started.add(routing);
if (doReplicas) {
routing = ShardRouting.newUnassigned(new ShardId(new Index(index, "_na_"), i), false,
RecoverySource.PeerRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foobar"));
if (replicaNode != null) {
routing = routing.initialize(replicaNode.getId(), i + "r", 0);
if (randomBoolean()) {
routing.started();
started.add(routing);
} else {
initializing.add(routing);
}
} else {
unassigned.add(routing); // unused yet
}
}
Collections.shuffle(started, random());
started.addAll(initializing);
list.add(new PlainShardIterator(new ShardId(new Index(index, "_na_"), i), started));
}
return new GroupShardsIterator(list);
}
public static class TestSearchResponse extends SearchResponse {
public final Set<ShardId> queried = new HashSet<>();
}
public static class TestSearchPhaseResult implements SearchPhaseResult {
final long id;
final DiscoveryNode node;
SearchShardTarget shardTarget;
public TestSearchPhaseResult(long id, DiscoveryNode node) {
this.id = id;
this.node = node;
}
@Override
public long id() {
return id;
}
@Override
public SearchShardTarget shardTarget() {
return this.shardTarget;
}
@Override
public void shardTarget(SearchShardTarget shardTarget) {
this.shardTarget = shardTarget;
}
@Override
public void readFrom(StreamInput in) throws IOException {
}
@Override
public void writeTo(StreamOutput out) throws IOException {
}
}
}

View File

@ -0,0 +1,50 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.cache;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.test.ESTestCase;
import static org.hamcrest.Matchers.containsString;
public class CacheBuilderTests extends ESTestCase {
public void testSettingExpireAfterAccess() {
IllegalArgumentException iae =
expectThrows(IllegalArgumentException.class, () -> CacheBuilder.builder().setExpireAfterAccess(TimeValue.MINUS_ONE));
assertThat(iae.getMessage(), containsString("expireAfterAccess <="));
iae = expectThrows(IllegalArgumentException.class, () -> CacheBuilder.builder().setExpireAfterAccess(TimeValue.ZERO));
assertThat(iae.getMessage(), containsString("expireAfterAccess <="));
final TimeValue timeValue = TimeValue.parseTimeValue(randomPositiveTimeValue(), "");
Cache<Object, Object> cache = CacheBuilder.builder().setExpireAfterAccess(timeValue).build();
assertEquals(timeValue.getNanos(), cache.getExpireAfterAccessNanos());
}
public void testSettingExpireAfterWrite() {
IllegalArgumentException iae =
expectThrows(IllegalArgumentException.class, () -> CacheBuilder.builder().setExpireAfterWrite(TimeValue.MINUS_ONE));
assertThat(iae.getMessage(), containsString("expireAfterWrite <="));
iae = expectThrows(IllegalArgumentException.class, () -> CacheBuilder.builder().setExpireAfterWrite(TimeValue.ZERO));
assertThat(iae.getMessage(), containsString("expireAfterWrite <="));
final TimeValue timeValue = TimeValue.parseTimeValue(randomPositiveTimeValue(), "");
Cache<Object, Object> cache = CacheBuilder.builder().setExpireAfterWrite(timeValue).build();
assertEquals(timeValue.getNanos(), cache.getExpireAfterWriteNanos());
}
}

View File

@ -228,7 +228,7 @@ public class CacheTests extends ESTestCase {
return now.get();
}
};
cache.setExpireAfterAccess(1);
cache.setExpireAfterAccessNanos(1);
List<Integer> evictedKeys = new ArrayList<>();
cache.setRemovalListener(notification -> {
assertEquals(RemovalNotification.RemovalReason.EVICTED, notification.getRemovalReason());
@ -265,7 +265,7 @@ public class CacheTests extends ESTestCase {
return now.get();
}
};
cache.setExpireAfterWrite(1);
cache.setExpireAfterWriteNanos(1);
List<Integer> evictedKeys = new ArrayList<>();
cache.setRemovalListener(notification -> {
assertEquals(RemovalNotification.RemovalReason.EVICTED, notification.getRemovalReason());
@ -307,7 +307,7 @@ public class CacheTests extends ESTestCase {
return now.get();
}
};
cache.setExpireAfterAccess(1);
cache.setExpireAfterAccessNanos(1);
now.set(0);
for (int i = 0; i < numberOfEntries; i++) {
cache.put(i, Integer.toString(i));

View File

@ -29,6 +29,8 @@ import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
@ -113,6 +115,82 @@ public class ObjectParserTests extends ESTestCase {
}
/**
* This test ensures we can use a classic pull-parsing parser
* together with the object parser
*/
public void testUseClassicPullParsingSubParser() throws IOException {
class ClassicParser {
URI parseURI(XContentParser parser) throws IOException {
String fieldName = null;
String host = "";
int port = 0;
XContentParser.Token token;
while (( token = parser.currentToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
fieldName = parser.currentName();
} else if (token == XContentParser.Token.VALUE_STRING){
if (fieldName.equals("host")) {
host = parser.text();
} else {
throw new IllegalStateException("boom");
}
} else if (token == XContentParser.Token.VALUE_NUMBER){
if (fieldName.equals("port")) {
port = parser.intValue();
} else {
throw new IllegalStateException("boom");
}
}
parser.nextToken();
}
return URI.create(host + ":" + port);
}
}
class Foo {
public String name;
public URI uri;
public void setName(String name) {
this.name = name;
}
public void setURI(URI uri) {
this.uri = uri;
}
}
class CustomParseFieldMatchSupplier implements ParseFieldMatcherSupplier {
public final ClassicParser parser;
CustomParseFieldMatchSupplier(ClassicParser parser) {
this.parser = parser;
}
@Override
public ParseFieldMatcher getParseFieldMatcher() {
return ParseFieldMatcher.EMPTY;
}
public URI parseURI(XContentParser parser) {
try {
return this.parser.parseURI(parser);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
}
XContentParser parser = XContentType.JSON.xContent()
.createParser("{\"url\" : { \"host\": \"http://foobar\", \"port\" : 80}, \"name\" : \"foobarbaz\"}");
ObjectParser<Foo, CustomParseFieldMatchSupplier> objectParser = new ObjectParser<>("foo");
objectParser.declareString(Foo::setName, new ParseField("name"));
objectParser.declareObjectOrDefault(Foo::setURI, (p, s) -> s.parseURI(p), () -> null, new ParseField("url"));
Foo s = objectParser.parse(parser, new Foo(), new CustomParseFieldMatchSupplier(new ClassicParser()));
assertEquals(s.uri.getHost(), "foobar");
assertEquals(s.uri.getPort(), 80);
assertEquals(s.name, "foobarbaz");
}
public void testExceptions() throws IOException {
XContentParser parser = XContentType.JSON.xContent().createParser("{\"test\" : \"foo\"}");
class TestStruct {

View File

@ -37,6 +37,7 @@ import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
import java.io.IOException;
import static org.elasticsearch.action.DocWriteRequest.OpType;
import static org.elasticsearch.client.Requests.clearIndicesCacheRequest;
import static org.elasticsearch.client.Requests.getRequest;
import static org.elasticsearch.client.Requests.indexRequest;
@ -191,31 +192,31 @@ public class DocumentActionsIT extends ESIntegTestCase {
assertThat(bulkResponse.getItems().length, equalTo(5));
assertThat(bulkResponse.getItems()[0].isFailed(), equalTo(false));
assertThat(bulkResponse.getItems()[0].getOpType(), equalTo("index"));
assertThat(bulkResponse.getItems()[0].getOpType(), equalTo(OpType.INDEX));
assertThat(bulkResponse.getItems()[0].getIndex(), equalTo(getConcreteIndexName()));
assertThat(bulkResponse.getItems()[0].getType(), equalTo("type1"));
assertThat(bulkResponse.getItems()[0].getId(), equalTo("1"));
assertThat(bulkResponse.getItems()[1].isFailed(), equalTo(false));
assertThat(bulkResponse.getItems()[1].getOpType(), equalTo("create"));
assertThat(bulkResponse.getItems()[1].getOpType(), equalTo(OpType.CREATE));
assertThat(bulkResponse.getItems()[1].getIndex(), equalTo(getConcreteIndexName()));
assertThat(bulkResponse.getItems()[1].getType(), equalTo("type1"));
assertThat(bulkResponse.getItems()[1].getId(), equalTo("2"));
assertThat(bulkResponse.getItems()[2].isFailed(), equalTo(false));
assertThat(bulkResponse.getItems()[2].getOpType(), equalTo("index"));
assertThat(bulkResponse.getItems()[2].getOpType(), equalTo(OpType.INDEX));
assertThat(bulkResponse.getItems()[2].getIndex(), equalTo(getConcreteIndexName()));
assertThat(bulkResponse.getItems()[2].getType(), equalTo("type1"));
String generatedId3 = bulkResponse.getItems()[2].getId();
assertThat(bulkResponse.getItems()[3].isFailed(), equalTo(false));
assertThat(bulkResponse.getItems()[3].getOpType(), equalTo("delete"));
assertThat(bulkResponse.getItems()[3].getOpType(), equalTo(OpType.DELETE));
assertThat(bulkResponse.getItems()[3].getIndex(), equalTo(getConcreteIndexName()));
assertThat(bulkResponse.getItems()[3].getType(), equalTo("type1"));
assertThat(bulkResponse.getItems()[3].getId(), equalTo("1"));
assertThat(bulkResponse.getItems()[4].isFailed(), equalTo(true));
assertThat(bulkResponse.getItems()[4].getOpType(), equalTo("index"));
assertThat(bulkResponse.getItems()[4].getOpType(), equalTo(OpType.INDEX));
assertThat(bulkResponse.getItems()[4].getIndex(), equalTo(getConcreteIndexName()));
assertThat(bulkResponse.getItems()[4].getType(), equalTo("type1"));

View File

@ -77,82 +77,6 @@ public class IndexServiceTests extends ESSingleNodeTestCase {
return new CompressedXContent(builder.string());
}
public void testFilteringAliases() throws Exception {
IndexService indexService = createIndex("test", Settings.EMPTY);
add(indexService, "cats", filter(termQuery("animal", "cat")));
add(indexService, "dogs", filter(termQuery("animal", "dog")));
add(indexService, "all", null);
assertThat(indexService.getMetaData().getAliases().containsKey("cats"), equalTo(true));
assertThat(indexService.getMetaData().getAliases().containsKey("dogs"), equalTo(true));
assertThat(indexService.getMetaData().getAliases().containsKey("turtles"), equalTo(false));
assertThat(indexService.aliasFilter(indexService.newQueryShardContext(), "cats").toString(), equalTo("animal:cat"));
assertThat(indexService.aliasFilter(indexService.newQueryShardContext(), "cats", "dogs").toString(), equalTo("animal:cat animal:dog"));
// Non-filtering alias should turn off all filters because filters are ORed
assertThat(indexService.aliasFilter(indexService.newQueryShardContext(), "all"), nullValue());
assertThat(indexService.aliasFilter(indexService.newQueryShardContext(), "cats", "all"), nullValue());
assertThat(indexService.aliasFilter(indexService.newQueryShardContext(), "all", "cats"), nullValue());
add(indexService, "cats", filter(termQuery("animal", "feline")));
add(indexService, "dogs", filter(termQuery("animal", "canine")));
assertThat(indexService.aliasFilter(indexService.newQueryShardContext(), "dogs", "cats").toString(), equalTo("animal:canine animal:feline"));
}
public void testAliasFilters() throws Exception {
IndexService indexService = createIndex("test", Settings.EMPTY);
add(indexService, "cats", filter(termQuery("animal", "cat")));
add(indexService, "dogs", filter(termQuery("animal", "dog")));
assertThat(indexService.aliasFilter(indexService.newQueryShardContext()), nullValue());
assertThat(indexService.aliasFilter(indexService.newQueryShardContext(), "dogs").toString(), equalTo("animal:dog"));
assertThat(indexService.aliasFilter(indexService.newQueryShardContext(), "dogs", "cats").toString(), equalTo("animal:dog animal:cat"));
add(indexService, "cats", filter(termQuery("animal", "feline")));
add(indexService, "dogs", filter(termQuery("animal", "canine")));
assertThat(indexService.aliasFilter(indexService.newQueryShardContext(), "dogs", "cats").toString(), equalTo("animal:canine animal:feline"));
}
public void testRemovedAliasFilter() throws Exception {
IndexService indexService = createIndex("test", Settings.EMPTY);
add(indexService, "cats", filter(termQuery("animal", "cat")));
remove(indexService, "cats");
try {
indexService.aliasFilter(indexService.newQueryShardContext(), "cats");
fail("Expected InvalidAliasNameException");
} catch (InvalidAliasNameException e) {
assertThat(e.getMessage(), containsString("Invalid alias name [cats]"));
}
}
public void testUnknownAliasFilter() throws Exception {
IndexService indexService = createIndex("test", Settings.EMPTY);
add(indexService, "cats", filter(termQuery("animal", "cat")));
add(indexService, "dogs", filter(termQuery("animal", "dog")));
try {
indexService.aliasFilter(indexService.newQueryShardContext(), "unknown");
fail();
} catch (InvalidAliasNameException e) {
// all is well
}
}
private void remove(IndexService service, String alias) {
IndexMetaData build = IndexMetaData.builder(service.getMetaData()).removeAlias(alias).build();
service.updateMetaData(build);
}
private void add(IndexService service, String alias, @Nullable CompressedXContent filter) {
IndexMetaData build = IndexMetaData.builder(service.getMetaData()).putAlias(AliasMetaData.builder(alias).filter(filter).build()).build();
service.updateMetaData(build);
}
public void testBaseAsyncTask() throws InterruptedException, IOException {
IndexService indexService = createIndex("test", Settings.EMPTY);
AtomicReference<CountDownLatch> latch = new AtomicReference<>(new CountDownLatch(1));

View File

@ -26,6 +26,7 @@ import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.script.ScriptService;
@ -81,8 +82,8 @@ public class SearchSlowLogTests extends ESSingleNodeTestCase {
}
@Override
public String[] filteringAliases() {
return new String[0];
public QueryBuilder filteringAliases() {
return null;
}
@Override

View File

@ -84,7 +84,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
assertThat(doc.rootDoc().getField("point.lon").fieldType().stored(), is(stored));
assertThat(doc.rootDoc().getField("point.geohash"), nullValue());
if (indexCreatedBefore22 == true) {
if (indexCreatedBefore22) {
assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
} else {
assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(GeoPointField.encodeLatLon(1.2, 1.3)));

View File

@ -0,0 +1,43 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.mapper;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.test.ESTestCase;
public class MapperTests extends ESTestCase {
public void testSuccessfulBuilderContext() {
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
ContentPath contentPath = new ContentPath(1);
Mapper.BuilderContext context = new Mapper.BuilderContext(settings, contentPath);
assertEquals(settings, context.indexSettings());
assertEquals(contentPath, context.path());
}
public void testBuilderContextWithIndexSettingsAsNull() {
NullPointerException e = expectThrows(NullPointerException.class, () -> new Mapper.BuilderContext(null, new ContentPath(1)));
}
}

View File

@ -101,7 +101,8 @@ public class MultiMatchQueryTests extends ESSingleNodeTestCase {
Term[] terms = new Term[] { new Term("foo", "baz"), new Term("bar", "baz") };
float[] boosts = new float[] {2, 3};
Query expected = BlendedTermQuery.booleanBlendedQuery(terms, boosts, false);
Query actual = MultiMatchQuery.blendTerm(new BytesRef("baz"), null, 1f, new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3));
Query actual = MultiMatchQuery.blendTerm(indexService.newQueryShardContext(), new BytesRef("baz"), null, 1f,
new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3));
assertEquals(expected, actual);
}
@ -115,7 +116,8 @@ public class MultiMatchQueryTests extends ESSingleNodeTestCase {
Term[] terms = new Term[] { new Term("foo", "baz"), new Term("bar", "baz") };
float[] boosts = new float[] {200, 30};
Query expected = BlendedTermQuery.booleanBlendedQuery(terms, boosts, false);
Query actual = MultiMatchQuery.blendTerm(new BytesRef("baz"), null, 1f, new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3));
Query actual = MultiMatchQuery.blendTerm(indexService.newQueryShardContext(), new BytesRef("baz"), null, 1f,
new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3));
assertEquals(expected, actual);
}
@ -132,7 +134,8 @@ public class MultiMatchQueryTests extends ESSingleNodeTestCase {
Term[] terms = new Term[] { new Term("foo", "baz") };
float[] boosts = new float[] {2};
Query expected = BlendedTermQuery.booleanBlendedQuery(terms, boosts, false);
Query actual = MultiMatchQuery.blendTerm(new BytesRef("baz"), null, 1f, new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3));
Query actual = MultiMatchQuery.blendTerm(indexService.newQueryShardContext(), new BytesRef("baz"), null, 1f,
new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3));
assertEquals(expected, actual);
}
@ -154,7 +157,8 @@ public class MultiMatchQueryTests extends ESSingleNodeTestCase {
.add(expectedClause1, Occur.SHOULD)
.add(expectedClause2, Occur.SHOULD)
.build();
Query actual = MultiMatchQuery.blendTerm(new BytesRef("baz"), null, 1f, new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3));
Query actual = MultiMatchQuery.blendTerm(indexService.newQueryShardContext(), new BytesRef("baz"), null, 1f,
new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3));
assertEquals(expected, actual);
}

View File

@ -23,10 +23,10 @@ import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.indices.InvalidIndexNameException;
@ -34,6 +34,8 @@ import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.InternalSettingsPlugin;
import org.elasticsearch.test.VersionUtils;
import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
import org.elasticsearch.test.junit.annotations.TestLogging;
import java.util.ArrayList;
import java.util.Collection;
@ -47,7 +49,6 @@ import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicIntegerArray;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.lessThanOrEqualTo;
@ -57,6 +58,8 @@ public class IndexActionIT extends ESIntegTestCase {
* This test tries to simulate load while creating an index and indexing documents
* while the index is being created.
*/
@TestLogging("_root:DEBUG,org.elasticsearch.index.shard.IndexShard:TRACE,org.elasticsearch.action.search:TRACE")
public void testAutoGenerateIdNoDuplicates() throws Exception {
int numberOfIterations = scaledRandomIntBetween(10, 50);
for (int i = 0; i < numberOfIterations; i++) {
@ -66,7 +69,7 @@ public class IndexActionIT extends ESIntegTestCase {
logger.info("indexing [{}] docs", numOfDocs);
List<IndexRequestBuilder> builders = new ArrayList<>(numOfDocs);
for (int j = 0; j < numOfDocs; j++) {
builders.add(client().prepareIndex("test", "type").setSource("field", "value"));
builders.add(client().prepareIndex("test", "type").setSource("field", "value_" + j));
}
indexRandom(true, builders);
logger.info("verifying indexed content");
@ -74,7 +77,13 @@ public class IndexActionIT extends ESIntegTestCase {
for (int j = 0; j < numOfChecks; j++) {
try {
logger.debug("running search with all types");
assertHitCount(client().prepareSearch("test").get(), numOfDocs);
SearchResponse response = client().prepareSearch("test").get();
if (response.getHits().totalHits() != numOfDocs) {
final String message = "Count is " + response.getHits().totalHits() + " but " + numOfDocs + " was expected. "
+ ElasticsearchAssertions.formatShardStatus(response);
logger.error("{}. search response: \n{}", message, response);
fail(message);
}
} catch (Exception e) {
logger.error("search for all docs types failed", e);
if (firstError == null) {
@ -83,7 +92,13 @@ public class IndexActionIT extends ESIntegTestCase {
}
try {
logger.debug("running search with a specific type");
assertHitCount(client().prepareSearch("test").setTypes("type").get(), numOfDocs);
SearchResponse response = client().prepareSearch("test").setTypes("type").get();
if (response.getHits().totalHits() != numOfDocs) {
final String message = "Count is " + response.getHits().totalHits() + " but " + numOfDocs + " was expected. "
+ ElasticsearchAssertions.formatShardStatus(response);
logger.error("{}. search response: \n{}", message, response);
fail(message);
}
} catch (Exception e) {
logger.error("search for all docs of a specific type failed", e);
if (firstError == null) {

View File

@ -19,9 +19,11 @@
package org.elasticsearch.indices;
import org.elasticsearch.action.admin.indices.alias.Alias;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval;
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
@ -30,6 +32,7 @@ import org.elasticsearch.test.ESIntegTestCase;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
import org.joda.time.chrono.ISOChronology;
import org.joda.time.format.DateTimeFormat;
import java.util.List;
@ -441,4 +444,55 @@ public class IndicesRequestCacheIT extends ESIntegTestCase {
equalTo(5L));
}
public void testCacheWithFilteredAlias() {
assertAcked(client().admin().indices().prepareCreate("index").addMapping("type", "created_at", "type=date")
.setSettings(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true, IndexMetaData.SETTING_NUMBER_OF_SHARDS,
1, IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
.addAlias(new Alias("last_week").filter(QueryBuilders.rangeQuery("created_at").gte("now-7d/d")))
.get());
DateTime now = new DateTime(DateTimeZone.UTC);
client().prepareIndex("index", "type", "1").setRouting("1").setSource("created_at",
DateTimeFormat.forPattern("YYYY-MM-dd").print(now)).get();
refresh();
assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(),
equalTo(0L));
assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(),
equalTo(0L));
SearchResponse r1 = client().prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0)
.setQuery(QueryBuilders.rangeQuery("created_at").gte("now-7d/d")).get();
assertSearchResponse(r1);
assertThat(r1.getHits().getTotalHits(), equalTo(1L));
assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(),
equalTo(0L));
assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(),
equalTo(1L));
r1 = client().prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0)
.setQuery(QueryBuilders.rangeQuery("created_at").gte("now-7d/d")).get();
assertSearchResponse(r1);
assertThat(r1.getHits().getTotalHits(), equalTo(1L));
assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(),
equalTo(1L));
assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(),
equalTo(1L));
r1 = client().prepareSearch("last_week").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0).get();
assertSearchResponse(r1);
assertThat(r1.getHits().getTotalHits(), equalTo(1L));
assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(),
equalTo(1L));
assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(),
equalTo(2L));
r1 = client().prepareSearch("last_week").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0).get();
assertSearchResponse(r1);
assertThat(r1.getHits().getTotalHits(), equalTo(1L));
assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(),
equalTo(2L));
assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(),
equalTo(2L));
}
}

View File

@ -21,7 +21,7 @@ package org.elasticsearch.ingest;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.index.IndexRequest;
@ -317,7 +317,7 @@ public class PipelineExecutionServiceTests extends ESTestCase {
int numRequest = scaledRandomIntBetween(8, 64);
int numIndexRequests = 0;
for (int i = 0; i < numRequest; i++) {
ActionRequest request;
DocWriteRequest request;
if (randomBoolean()) {
if (randomBoolean()) {
request = new DeleteRequest("_index", "_type", "_id");

View File

@ -20,6 +20,7 @@
package org.elasticsearch.routing;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.RoutingMissingException;
import org.elasticsearch.action.admin.indices.alias.Alias;
import org.elasticsearch.action.bulk.BulkItemResponse;
@ -259,7 +260,7 @@ public class SimpleRoutingIT extends ESIntegTestCase {
for (BulkItemResponse bulkItemResponse : bulkResponse) {
assertThat(bulkItemResponse.isFailed(), equalTo(true));
assertThat(bulkItemResponse.getOpType(), equalTo("index"));
assertThat(bulkItemResponse.getOpType(), equalTo(DocWriteRequest.OpType.INDEX));
assertThat(bulkItemResponse.getFailure().getStatus(), equalTo(RestStatus.BAD_REQUEST));
assertThat(bulkItemResponse.getFailure().getCause(), instanceOf(RoutingMissingException.class));
assertThat(bulkItemResponse.getFailureMessage(), containsString("routing is required for [test]/[type1]/[1]"));
@ -280,7 +281,7 @@ public class SimpleRoutingIT extends ESIntegTestCase {
for (BulkItemResponse bulkItemResponse : bulkResponse) {
assertThat(bulkItemResponse.isFailed(), equalTo(true));
assertThat(bulkItemResponse.getOpType(), equalTo("update"));
assertThat(bulkItemResponse.getOpType(), equalTo(DocWriteRequest.OpType.UPDATE));
assertThat(bulkItemResponse.getFailure().getStatus(), equalTo(RestStatus.BAD_REQUEST));
assertThat(bulkItemResponse.getFailure().getCause(), instanceOf(RoutingMissingException.class));
assertThat(bulkItemResponse.getFailureMessage(), containsString("routing is required for [test]/[type1]/[1]"));
@ -301,7 +302,7 @@ public class SimpleRoutingIT extends ESIntegTestCase {
for (BulkItemResponse bulkItemResponse : bulkResponse) {
assertThat(bulkItemResponse.isFailed(), equalTo(true));
assertThat(bulkItemResponse.getOpType(), equalTo("delete"));
assertThat(bulkItemResponse.getOpType(), equalTo(DocWriteRequest.OpType.DELETE));
assertThat(bulkItemResponse.getFailure().getStatus(), equalTo(RestStatus.BAD_REQUEST));
assertThat(bulkItemResponse.getFailure().getCause(), instanceOf(RoutingMissingException.class));
assertThat(bulkItemResponse.getFailureMessage(), containsString("routing is required for [test]/[type1]/[1]"));

View File

@ -35,6 +35,7 @@ import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.indices.IndicesModule;
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.plugins.SearchPlugin;
import org.elasticsearch.script.Script;
@ -68,6 +69,7 @@ public abstract class AbstractSearchTestCase extends ESTestCase {
protected NamedWriteableRegistry namedWriteableRegistry;
protected SearchRequestParsers searchRequestParsers;
private TestSearchExtPlugin searchExtPlugin;
protected IndicesQueriesRegistry queriesRegistry;
public void setUp() throws Exception {
super.setUp();
@ -79,6 +81,7 @@ public abstract class AbstractSearchTestCase extends ESTestCase {
entries.addAll(searchModule.getNamedWriteables());
namedWriteableRegistry = new NamedWriteableRegistry(entries);
searchRequestParsers = searchModule.getSearchRequestParsers();
queriesRegistry = searchModule.getQueryParserRegistry();
}
protected SearchSourceBuilder createSearchSourceBuilder() throws IOException {

View File

@ -27,6 +27,7 @@ import org.elasticsearch.action.search.SearchPhaseExecutionException;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
@ -41,6 +42,7 @@ import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.plugins.SearchPlugin;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.fetch.ShardFetchRequest;
import org.elasticsearch.search.internal.AliasFilter;
import org.elasticsearch.search.internal.ShardSearchLocalRequest;
import org.elasticsearch.search.query.QuerySearchResultProvider;
import org.elasticsearch.test.ESSingleNodeTestCase;
@ -173,7 +175,7 @@ public class SearchServiceTests extends ESSingleNodeTestCase {
try {
QuerySearchResultProvider querySearchResultProvider = service.executeQueryPhase(
new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT,
new SearchSourceBuilder(), new String[0], false));
new SearchSourceBuilder(), new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY)));
IntArrayList intCursors = new IntArrayList(1);
intCursors.add(0);
ShardFetchRequest req = new ShardFetchRequest(querySearchResultProvider.id(), intCursors, null /* not a scroll */);

View File

@ -19,21 +19,51 @@
package org.elasticsearch.search.internal;
import org.elasticsearch.Version;
import org.elasticsearch.action.ShardValidateQueryRequestTests;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.cluster.metadata.AliasMetaData;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.TestShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.index.query.QueryParseContext;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.query.RandomQueryBuilder;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.InvalidAliasNameException;
import org.elasticsearch.search.AbstractSearchTestCase;
import java.io.IOException;
import java.util.Base64;
import java.util.function.Function;
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.nullValue;
public class ShardSearchTransportRequestTests extends AbstractSearchTestCase {
private IndexMetaData baseMetaData = IndexMetaData.builder("test").settings(Settings.builder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build())
.numberOfShards(1).numberOfReplicas(1).build();
public void testSerialization() throws Exception {
ShardSearchTransportRequest shardSearchTransportRequest = createShardSearchTransportRequest();
@ -43,7 +73,7 @@ public class ShardSearchTransportRequestTests extends AbstractSearchTestCase {
ShardSearchTransportRequest deserializedRequest = new ShardSearchTransportRequest();
deserializedRequest.readFrom(in);
assertEquals(deserializedRequest.scroll(), shardSearchTransportRequest.scroll());
assertArrayEquals(deserializedRequest.filteringAliases(), shardSearchTransportRequest.filteringAliases());
assertEquals(deserializedRequest.filteringAliases(), shardSearchTransportRequest.filteringAliases());
assertArrayEquals(deserializedRequest.indices(), shardSearchTransportRequest.indices());
assertArrayEquals(deserializedRequest.types(), shardSearchTransportRequest.types());
assertEquals(deserializedRequest.indicesOptions(), shardSearchTransportRequest.indicesOptions());
@ -55,6 +85,7 @@ public class ShardSearchTransportRequestTests extends AbstractSearchTestCase {
assertEquals(deserializedRequest.numberOfShards(), shardSearchTransportRequest.numberOfShards());
assertEquals(deserializedRequest.cacheKey(), shardSearchTransportRequest.cacheKey());
assertNotSame(deserializedRequest, shardSearchTransportRequest);
assertEquals(deserializedRequest.filteringAliases(), shardSearchTransportRequest.filteringAliases());
}
}
}
@ -64,13 +95,129 @@ public class ShardSearchTransportRequestTests extends AbstractSearchTestCase {
ShardId shardId = new ShardId(randomAsciiOfLengthBetween(2, 10), randomAsciiOfLengthBetween(2, 10), randomInt());
ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, null, null, randomBoolean(), ShardRoutingState.UNASSIGNED,
new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "reason"));
String[] filteringAliases;
final AliasFilter filteringAliases;
if (randomBoolean()) {
filteringAliases = generateRandomStringArray(10, 10, false, false);
String[] strings = generateRandomStringArray(10, 10, false, false);
filteringAliases = new AliasFilter(RandomQueryBuilder.createQuery(random()), strings);
} else {
filteringAliases = Strings.EMPTY_ARRAY;
filteringAliases = new AliasFilter(null, Strings.EMPTY_ARRAY);
}
return new ShardSearchTransportRequest(searchRequest, shardRouting,
randomIntBetween(1, 100), filteringAliases, Math.abs(randomLong()));
}
public void testFilteringAliases() throws Exception {
IndexMetaData indexMetaData = baseMetaData;
indexMetaData = add(indexMetaData, "cats", filter(termQuery("animal", "cat")));
indexMetaData = add(indexMetaData, "dogs", filter(termQuery("animal", "dog")));
indexMetaData = add(indexMetaData, "all", null);
assertThat(indexMetaData.getAliases().containsKey("cats"), equalTo(true));
assertThat(indexMetaData.getAliases().containsKey("dogs"), equalTo(true));
assertThat(indexMetaData.getAliases().containsKey("turtles"), equalTo(false));
assertEquals(aliasFilter(indexMetaData, "cats"), QueryBuilders.termQuery("animal", "cat"));
assertEquals(aliasFilter(indexMetaData, "cats", "dogs"), QueryBuilders.boolQuery().should(QueryBuilders.termQuery("animal", "cat"))
.should(QueryBuilders.termQuery("animal", "dog")));
// Non-filtering alias should turn off all filters because filters are ORed
assertThat(aliasFilter(indexMetaData,"all"), nullValue());
assertThat(aliasFilter(indexMetaData, "cats", "all"), nullValue());
assertThat(aliasFilter(indexMetaData, "all", "cats"), nullValue());
indexMetaData = add(indexMetaData, "cats", filter(termQuery("animal", "feline")));
indexMetaData = add(indexMetaData, "dogs", filter(termQuery("animal", "canine")));
assertEquals(aliasFilter(indexMetaData, "dogs", "cats"),QueryBuilders.boolQuery()
.should(QueryBuilders.termQuery("animal", "canine"))
.should(QueryBuilders.termQuery("animal", "feline")));
}
public void testRemovedAliasFilter() throws Exception {
IndexMetaData indexMetaData = baseMetaData;
indexMetaData = add(indexMetaData, "cats", filter(termQuery("animal", "cat")));
indexMetaData = remove(indexMetaData, "cats");
try {
aliasFilter(indexMetaData, "cats");
fail("Expected InvalidAliasNameException");
} catch (InvalidAliasNameException e) {
assertThat(e.getMessage(), containsString("Invalid alias name [cats]"));
}
}
public void testUnknownAliasFilter() throws Exception {
IndexMetaData indexMetaData = baseMetaData;
indexMetaData = add(indexMetaData, "cats", filter(termQuery("animal", "cat")));
indexMetaData = add(indexMetaData, "dogs", filter(termQuery("animal", "dog")));
IndexMetaData finalIndexMetadata = indexMetaData;
expectThrows(InvalidAliasNameException.class, () -> aliasFilter(finalIndexMetadata, "unknown"));
}
public static CompressedXContent filter(QueryBuilder filterBuilder) throws IOException {
XContentBuilder builder = XContentFactory.jsonBuilder();
filterBuilder.toXContent(builder, ToXContent.EMPTY_PARAMS);
builder.close();
return new CompressedXContent(builder.string());
}
private IndexMetaData remove(IndexMetaData indexMetaData, String alias) {
IndexMetaData build = IndexMetaData.builder(indexMetaData).removeAlias(alias).build();
return build;
}
private IndexMetaData add(IndexMetaData indexMetaData, String alias, @Nullable CompressedXContent filter) {
return IndexMetaData.builder(indexMetaData).putAlias(AliasMetaData.builder(alias).filter(filter).build()).build();
}
public QueryBuilder aliasFilter(IndexMetaData indexMetaData, String... aliasNames) {
Function<XContentParser, QueryParseContext> contextFactory = (p) -> new QueryParseContext(queriesRegistry,
p, new ParseFieldMatcher(Settings.EMPTY));
return ShardSearchRequest.parseAliasFilter(contextFactory, indexMetaData, aliasNames);
}
// BWC test for changes from #20916
public void testSerialize50Request() throws IOException {
BytesArray requestBytes = new BytesArray(Base64.getDecoder()
// this is a base64 encoded request generated with the same input
.decode("AAh4cXptdEhJcgdnT0d1ZldWyfL/sgQBJAHkDAMBAAIBAQ4TWlljWlZ5TkVmRU5xQnFQVHBjVBRZbUpod2pRV2dDSXVxRXpRaEdGVBRFZWFJY0plT2hn" +
"UEpISFhmSXR6Qw5XZ1hQcmFidWhWalFSQghuUWNwZ2JjQxBtZldRREJPaGF3UnlQSE56EVhQSUtRa25Iekh3bU5kbGVECWlFT2NIeEh3RgZIYXpMTWgUeGJq" +
"VU9Tdkdua3RORU5QZkNrb1EOalRyWGh5WXhvZ3plV2UUcWlXZFl2eUFUSXdPVGdMUUtYTHAJU3RKR3JxQkVJEkdEQ01xUHpnWWNaT3N3U3prSRIUeURlVFpM" +
"Q1lBZERZcWpDb3NOVWIST1NyQlZtdUNrd0F1UXRvdVRjEGp6RlVMd1dqc3VtUVNaTk0JT3N2cnpLQ3ZLBmRpS1J6cgdYbmVhZnBxBUlTUU9pEEJMcm1ERXVs" +
"eXhESlBoVkgTaWdUUmtVZGh4d0FFc2ZKRm9ZahNrb01XTnFFd2NWSVVDU3pWS2xBC3JVTWV3V2tUUWJUE3VGQU1Hd21CYUFMTmNQZkxobXUIZ3dxWHBxWXcF" +
"bmNDZUEOTFBSTEpYZVF6Z3d2eE0PV1BucUFacll6WWRxa1hCDGxkbXNMaVRzcUZXbAtSY0NsY3FNdlJQcv8BAP////8PAQAAARQAAQp5THlIcHdQeGtMAAAB" +
"AQAAAAEDbkVLAQMBCgACAAADAQABAAAAAQhIc25wRGxQbwEBQgABAAACAQMAAAEIAAAJMF9OSG9kSmh2HwABAwljRW5MVWxFbVQFemlxWG8KcXZQTkRUUGJk" +
"bgECCkpMbXVMT1dtVnkISEdUUHhsd0cBAAEJAAABA2lkcz+rKsUAAAAAAAAAAAECAQYAAgwxX0ZlRWxSQkhzQ07/////DwABAAEDCnRyYXFHR1hjVHkKTERY" +
"aE1HRWVySghuSWtzbEtXUwABCgEHSlRwQnhwdwAAAQECAgAAAAAAAQcyX3FlYmNDGQEEBklxZU9iUQdTc01Gek5YCWlMd2xuamNRQwNiVncAAUHt61kAAQR0" +
"ZXJtP4AAAAANbUtDSnpHU3lidm5KUBUMaVpqeG9vcm5QSFlvAAEBLGdtcWxuRWpWTXdvTlhMSHh0RWlFdHBnbEF1cUNmVmhoUVlwRFZxVllnWWV1A2ZvbwEA" +
"AQhwYWlubGVzc/8AALk4AAAAAAABAAAAAAAAAwpKU09PU0ZmWnhFClVqTGxMa2p3V2gKdUJwZ3R3dXFER5Hg97uT7MOmPgEADw"));
try (StreamInput in = new NamedWriteableAwareStreamInput(requestBytes.streamInput(), namedWriteableRegistry)) {
in.setVersion(ShardValidateQueryRequestTests.V_5_0_0);
ShardSearchTransportRequest readRequest = new ShardSearchTransportRequest();
readRequest.readFrom(in);
assertEquals(0, in.available());
IllegalStateException illegalStateException = expectThrows(IllegalStateException.class, () -> readRequest.filteringAliases());
assertEquals("alias filter for aliases: [JSOOSFfZxE, UjLlLkjwWh, uBpgtwuqDG] must be rewritten first",
illegalStateException.getMessage());
IndexMetaData.Builder indexMetadata = new IndexMetaData.Builder(baseMetaData)
.putAlias(AliasMetaData.newAliasMetaDataBuilder("JSOOSFfZxE").filter("{\"term\" : {\"foo\" : \"bar\"}}"))
.putAlias(AliasMetaData.newAliasMetaDataBuilder("UjLlLkjwWh").filter("{\"term\" : {\"foo\" : \"bar1\"}}"))
.putAlias(AliasMetaData.newAliasMetaDataBuilder("uBpgtwuqDG").filter("{\"term\" : {\"foo\" : \"bar2\"}}"));
IndexSettings indexSettings = new IndexSettings(indexMetadata.build(), Settings.EMPTY);
final long nowInMillis = randomPositiveLong();
QueryShardContext context = new QueryShardContext(
0, indexSettings, null, null, null, null, null, queriesRegistry, null, null, null,
() -> nowInMillis);
readRequest.rewrite(context);
QueryBuilder queryBuilder = readRequest.filteringAliases();
assertEquals(queryBuilder, QueryBuilders.boolQuery()
.should(QueryBuilders.termQuery("foo", "bar"))
.should(QueryBuilders.termQuery("foo", "bar1"))
.should(QueryBuilders.termQuery("foo", "bar2"))
);
BytesStreamOutput output = new BytesStreamOutput();
output.setVersion(ShardValidateQueryRequestTests.V_5_0_0);
readRequest.writeTo(output);
assertEquals(output.bytes().toBytesRef(), requestBytes.toBytesRef());
}
}
}

Some files were not shown because too many files have changed in this diff Show More