Merge branch 'master' into feature-suggest-refactoring

Conflicts:
	core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java
This commit is contained in:
Christoph Büscher 2016-03-01 17:20:15 +01:00
commit aecf51cb42
123 changed files with 10067 additions and 374 deletions

View File

@ -746,7 +746,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]MultiValueMode.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]MultiValueMode.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]SearchModule.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]SearchModule.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]SearchService.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]SearchService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]action[/\\]SearchServiceTransportAction.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]AggregatorFactories.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]AggregatorFactories.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]AggregatorFactory.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]AggregatorFactory.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]InternalAggregation.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]InternalAggregation.java" checks="LineLength" />

View File

@ -21,18 +21,16 @@ package org.elasticsearch.action;
/** /**
* A listener for action responses or failures. * A listener for action responses or failures.
*
*
*/ */
public interface ActionListener<Response> { public interface ActionListener<Response> {
/** /**
* A response handler. * Handle action response. This response may constitute a failure or a
* success but it is up to the listener to make that decision.
*/ */
void onResponse(Response response); void onResponse(Response response);
/** /**
* A failure handler. * A failure caused by an exception at some phase of the task.
*/ */
void onFailure(Throwable e); void onFailure(Throwable e);
} }

View File

@ -28,7 +28,9 @@ import org.elasticsearch.action.update.UpdateResponse;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.StatusToXContent; import org.elasticsearch.common.xcontent.StatusToXContent;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString; import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.RestStatus;
@ -76,7 +78,15 @@ public class BulkItemResponse implements Streamable, StatusToXContent {
/** /**
* Represents a failure. * Represents a failure.
*/ */
public static class Failure { public static class Failure implements Writeable<Failure>, ToXContent {
static final String INDEX_FIELD = "index";
static final String TYPE_FIELD = "type";
static final String ID_FIELD = "id";
static final String CAUSE_FIELD = "cause";
static final String STATUS_FIELD = "status";
public static final Failure PROTOTYPE = new Failure(null, null, null, null);
private final String index; private final String index;
private final String type; private final String type;
private final String id; private final String id;
@ -126,9 +136,39 @@ public class BulkItemResponse implements Streamable, StatusToXContent {
return this.status; return this.status;
} }
/**
* The actual cause of the failure.
*/
public Throwable getCause() { public Throwable getCause() {
return cause; return cause;
} }
@Override
public Failure readFrom(StreamInput in) throws IOException {
return new Failure(in.readString(), in.readString(), in.readOptionalString(), in.readThrowable());
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(getIndex());
out.writeString(getType());
out.writeOptionalString(getId());
out.writeThrowable(getCause());
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.field(INDEX_FIELD, index);
builder.field(TYPE_FIELD, type);
if (id != null) {
builder.field(ID_FIELD, id);
}
builder.startObject(CAUSE_FIELD);
ElasticsearchException.toXContent(builder, params, cause);
builder.endObject();
builder.field(STATUS_FIELD, status.getStatus());
return builder;
}
} }
private int id; private int id;
@ -265,11 +305,7 @@ public class BulkItemResponse implements Streamable, StatusToXContent {
} }
if (in.readBoolean()) { if (in.readBoolean()) {
String fIndex = in.readString(); failure = Failure.PROTOTYPE.readFrom(in);
String fType = in.readString();
String fId = in.readOptionalString();
Throwable throwable = in.readThrowable();
failure = new Failure(fIndex, fType, fId, throwable);
} }
} }
@ -294,10 +330,7 @@ public class BulkItemResponse implements Streamable, StatusToXContent {
out.writeBoolean(false); out.writeBoolean(false);
} else { } else {
out.writeBoolean(true); out.writeBoolean(true);
out.writeString(failure.getIndex()); failure.writeTo(out);
out.writeString(failure.getType());
out.writeOptionalString(failure.getId());
out.writeThrowable(failure.getCause());
} }
} }
} }

View File

@ -94,6 +94,12 @@ public class BulkShardRequest extends ReplicationRequest<BulkShardRequest> {
@Override @Override
public String toString() { public String toString() {
return "shard bulk {" + super.toString() + "}"; // This is included in error messages so we'll try to make it somewhat user friendly.
StringBuilder b = new StringBuilder("BulkShardRequest to [");
b.append(index).append("] containing [").append(items.length).append("] requests");
if (refresh) {
b.append(" and a refresh");
}
return b.toString();
} }
} }

View File

@ -38,7 +38,7 @@ import java.util.function.Predicate;
/** /**
* Encapsulates synchronous and asynchronous retry logic. * Encapsulates synchronous and asynchronous retry logic.
*/ */
class Retry { public class Retry {
private final Class<? extends Throwable> retryOnThrowable; private final Class<? extends Throwable> retryOnThrowable;
private BackoffPolicy backoffPolicy; private BackoffPolicy backoffPolicy;

View File

@ -223,6 +223,13 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
return validationException; return validationException;
} }
/**
* The content type that will be used when generating a document from user provided objects like Maps.
*/
public XContentType getContentType() {
return contentType;
}
/** /**
* Sets the content type that will be used when generating a document from user provided objects (like Map). * Sets the content type that will be used when generating a document from user provided objects (like Map).
*/ */
@ -294,6 +301,7 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
return this; return this;
} }
@Override
public String parent() { public String parent() {
return this.parent; return this.parent;
} }
@ -645,7 +653,7 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
@Override @Override
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) throws IOException {
super.readFrom(in); super.readFrom(in);
type = in.readString(); type = in.readOptionalString();
id = in.readOptionalString(); id = in.readOptionalString();
routing = in.readOptionalString(); routing = in.readOptionalString();
parent = in.readOptionalString(); parent = in.readOptionalString();
@ -663,7 +671,7 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
@Override @Override
public void writeTo(StreamOutput out) throws IOException { public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out); super.writeTo(out);
out.writeString(type); out.writeOptionalString(type);
out.writeOptionalString(id); out.writeOptionalString(id);
out.writeOptionalString(routing); out.writeOptionalString(routing);
out.writeOptionalString(parent); out.writeOptionalString(parent);

View File

@ -39,7 +39,7 @@ import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchPhaseResult;
import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.action.SearchServiceTransportAction; import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController; import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.fetch.ShardFetchSearchRequest; import org.elasticsearch.search.fetch.ShardFetchSearchRequest;
import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.InternalSearchResponse;
@ -58,7 +58,7 @@ import static org.elasticsearch.action.search.TransportSearchHelper.internalSear
abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult> extends AbstractAsyncAction { abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult> extends AbstractAsyncAction {
protected final ESLogger logger; protected final ESLogger logger;
protected final SearchServiceTransportAction searchService; protected final SearchTransportService searchTransportService;
private final IndexNameExpressionResolver indexNameExpressionResolver; private final IndexNameExpressionResolver indexNameExpressionResolver;
protected final SearchPhaseController searchPhaseController; protected final SearchPhaseController searchPhaseController;
protected final ThreadPool threadPool; protected final ThreadPool threadPool;
@ -76,12 +76,12 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
private final Object shardFailuresMutex = new Object(); private final Object shardFailuresMutex = new Object();
protected volatile ScoreDoc[] sortedShardList; protected volatile ScoreDoc[] sortedShardList;
protected AbstractSearchAsyncAction(ESLogger logger, SearchServiceTransportAction searchService, ClusterService clusterService, protected AbstractSearchAsyncAction(ESLogger logger, SearchTransportService searchTransportService, ClusterService clusterService,
IndexNameExpressionResolver indexNameExpressionResolver, IndexNameExpressionResolver indexNameExpressionResolver,
SearchPhaseController searchPhaseController, ThreadPool threadPool, SearchRequest request, SearchPhaseController searchPhaseController, ThreadPool threadPool, SearchRequest request,
ActionListener<SearchResponse> listener) { ActionListener<SearchResponse> listener) {
this.logger = logger; this.logger = logger;
this.searchService = searchService; this.searchTransportService = searchTransportService;
this.indexNameExpressionResolver = indexNameExpressionResolver; this.indexNameExpressionResolver = indexNameExpressionResolver;
this.searchPhaseController = searchPhaseController; this.searchPhaseController = searchPhaseController;
this.threadPool = threadPool; this.threadPool = threadPool;
@ -332,7 +332,7 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
protected void sendReleaseSearchContext(long contextId, DiscoveryNode node) { protected void sendReleaseSearchContext(long contextId, DiscoveryNode node) {
if (node != null) { if (node != null) {
searchService.sendFreeContext(node, contextId, request); searchTransportService.sendFreeContext(node, contextId, request);
} }
} }

View File

@ -26,7 +26,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.action.SearchServiceTransportAction; import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController; import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.dfs.AggregatedDfs; import org.elasticsearch.search.dfs.AggregatedDfs;
import org.elasticsearch.search.dfs.DfsSearchResult; import org.elasticsearch.search.dfs.DfsSearchResult;
@ -43,11 +43,12 @@ class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<DfsSea
private final AtomicArray<QueryFetchSearchResult> queryFetchResults; private final AtomicArray<QueryFetchSearchResult> queryFetchResults;
SearchDfsQueryAndFetchAsyncAction(ESLogger logger, SearchServiceTransportAction searchService, SearchDfsQueryAndFetchAsyncAction(ESLogger logger, SearchTransportService searchTransportService,
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
SearchPhaseController searchPhaseController, ThreadPool threadPool, SearchPhaseController searchPhaseController, ThreadPool threadPool,
SearchRequest request, ActionListener<SearchResponse> listener) { SearchRequest request, ActionListener<SearchResponse> listener) {
super(logger, searchService, clusterService, indexNameExpressionResolver, searchPhaseController, threadPool, request, listener); super(logger, searchTransportService, clusterService, indexNameExpressionResolver, searchPhaseController, threadPool,
request, listener);
queryFetchResults = new AtomicArray<>(firstResults.length()); queryFetchResults = new AtomicArray<>(firstResults.length());
} }
@ -59,7 +60,7 @@ class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<DfsSea
@Override @Override
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request, protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request,
ActionListener<DfsSearchResult> listener) { ActionListener<DfsSearchResult> listener) {
searchService.sendExecuteDfs(node, request, listener); searchTransportService.sendExecuteDfs(node, request, listener);
} }
@Override @Override
@ -77,7 +78,7 @@ class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<DfsSea
void executeSecondPhase(final int shardIndex, final DfsSearchResult dfsResult, final AtomicInteger counter, void executeSecondPhase(final int shardIndex, final DfsSearchResult dfsResult, final AtomicInteger counter,
final DiscoveryNode node, final QuerySearchRequest querySearchRequest) { final DiscoveryNode node, final QuerySearchRequest querySearchRequest) {
searchService.sendExecuteFetch(node, querySearchRequest, new ActionListener<QueryFetchSearchResult>() { searchTransportService.sendExecuteFetch(node, querySearchRequest, new ActionListener<QueryFetchSearchResult>() {
@Override @Override
public void onResponse(QueryFetchSearchResult result) { public void onResponse(QueryFetchSearchResult result) {
result.shardTarget(dfsResult.shardTarget()); result.shardTarget(dfsResult.shardTarget());

View File

@ -29,7 +29,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.action.SearchServiceTransportAction; import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController; import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.dfs.AggregatedDfs; import org.elasticsearch.search.dfs.AggregatedDfs;
import org.elasticsearch.search.dfs.DfsSearchResult; import org.elasticsearch.search.dfs.DfsSearchResult;
@ -50,11 +50,12 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<DfsSe
final AtomicArray<FetchSearchResult> fetchResults; final AtomicArray<FetchSearchResult> fetchResults;
final AtomicArray<IntArrayList> docIdsToLoad; final AtomicArray<IntArrayList> docIdsToLoad;
SearchDfsQueryThenFetchAsyncAction(ESLogger logger, SearchServiceTransportAction searchService, SearchDfsQueryThenFetchAsyncAction(ESLogger logger, SearchTransportService searchTransportService,
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
SearchPhaseController searchPhaseController, ThreadPool threadPool, SearchPhaseController searchPhaseController, ThreadPool threadPool,
SearchRequest request, ActionListener<SearchResponse> listener) { SearchRequest request, ActionListener<SearchResponse> listener) {
super(logger, searchService, clusterService, indexNameExpressionResolver, searchPhaseController, threadPool, request, listener); super(logger, searchTransportService, clusterService, indexNameExpressionResolver, searchPhaseController, threadPool,
request, listener);
queryResults = new AtomicArray<>(firstResults.length()); queryResults = new AtomicArray<>(firstResults.length());
fetchResults = new AtomicArray<>(firstResults.length()); fetchResults = new AtomicArray<>(firstResults.length());
docIdsToLoad = new AtomicArray<>(firstResults.length()); docIdsToLoad = new AtomicArray<>(firstResults.length());
@ -68,7 +69,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<DfsSe
@Override @Override
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request, protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request,
ActionListener<DfsSearchResult> listener) { ActionListener<DfsSearchResult> listener) {
searchService.sendExecuteDfs(node, request, listener); searchTransportService.sendExecuteDfs(node, request, listener);
} }
@Override @Override
@ -85,7 +86,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<DfsSe
void executeQuery(final int shardIndex, final DfsSearchResult dfsResult, final AtomicInteger counter, void executeQuery(final int shardIndex, final DfsSearchResult dfsResult, final AtomicInteger counter,
final QuerySearchRequest querySearchRequest, final DiscoveryNode node) { final QuerySearchRequest querySearchRequest, final DiscoveryNode node) {
searchService.sendExecuteQuery(node, querySearchRequest, new ActionListener<QuerySearchResult>() { searchTransportService.sendExecuteQuery(node, querySearchRequest, new ActionListener<QuerySearchResult>() {
@Override @Override
public void onResponse(QuerySearchResult result) { public void onResponse(QuerySearchResult result) {
result.shardTarget(dfsResult.shardTarget()); result.shardTarget(dfsResult.shardTarget());
@ -157,7 +158,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<DfsSe
void executeFetch(final int shardIndex, final SearchShardTarget shardTarget, final AtomicInteger counter, void executeFetch(final int shardIndex, final SearchShardTarget shardTarget, final AtomicInteger counter,
final ShardFetchSearchRequest fetchSearchRequest, DiscoveryNode node) { final ShardFetchSearchRequest fetchSearchRequest, DiscoveryNode node) {
searchService.sendExecuteFetch(node, fetchSearchRequest, new ActionListener<FetchSearchResult>() { searchTransportService.sendExecuteFetch(node, fetchSearchRequest, new ActionListener<FetchSearchResult>() {
@Override @Override
public void onResponse(FetchSearchResult result) { public void onResponse(FetchSearchResult result) {
result.shardTarget(shardTarget); result.shardTarget(shardTarget);

View File

@ -25,7 +25,7 @@ import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.search.action.SearchServiceTransportAction; import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController; import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.fetch.QueryFetchSearchResult; import org.elasticsearch.search.fetch.QueryFetchSearchResult;
import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.InternalSearchResponse;
@ -36,11 +36,12 @@ import java.io.IOException;
class SearchQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<QueryFetchSearchResult> { class SearchQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<QueryFetchSearchResult> {
SearchQueryAndFetchAsyncAction(ESLogger logger, SearchServiceTransportAction searchService, SearchQueryAndFetchAsyncAction(ESLogger logger, SearchTransportService searchTransportService,
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
SearchPhaseController searchPhaseController, ThreadPool threadPool, SearchPhaseController searchPhaseController, ThreadPool threadPool,
SearchRequest request, ActionListener<SearchResponse> listener) { SearchRequest request, ActionListener<SearchResponse> listener) {
super(logger, searchService, clusterService, indexNameExpressionResolver, searchPhaseController, threadPool, request, listener); super(logger, searchTransportService, clusterService, indexNameExpressionResolver, searchPhaseController, threadPool,
request, listener);
} }
@Override @Override
@ -51,7 +52,7 @@ class SearchQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<QueryFetc
@Override @Override
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request, protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request,
ActionListener<QueryFetchSearchResult> listener) { ActionListener<QueryFetchSearchResult> listener) {
searchService.sendExecuteFetch(node, request, listener); searchTransportService.sendExecuteFetch(node, request, listener);
} }
@Override @Override

View File

@ -29,7 +29,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.action.SearchServiceTransportAction; import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController; import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.fetch.FetchSearchResult; import org.elasticsearch.search.fetch.FetchSearchResult;
import org.elasticsearch.search.fetch.ShardFetchSearchRequest; import org.elasticsearch.search.fetch.ShardFetchSearchRequest;
@ -46,7 +46,7 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<QuerySea
final AtomicArray<FetchSearchResult> fetchResults; final AtomicArray<FetchSearchResult> fetchResults;
final AtomicArray<IntArrayList> docIdsToLoad; final AtomicArray<IntArrayList> docIdsToLoad;
SearchQueryThenFetchAsyncAction(ESLogger logger, SearchServiceTransportAction searchService, SearchQueryThenFetchAsyncAction(ESLogger logger, SearchTransportService searchService,
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
SearchPhaseController searchPhaseController, ThreadPool threadPool, SearchPhaseController searchPhaseController, ThreadPool threadPool,
SearchRequest request, ActionListener<SearchResponse> listener) { SearchRequest request, ActionListener<SearchResponse> listener) {
@ -63,7 +63,7 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<QuerySea
@Override @Override
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request, protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request,
ActionListener<QuerySearchResultProvider> listener) { ActionListener<QuerySearchResultProvider> listener) {
searchService.sendExecuteQuery(node, request, listener); searchTransportService.sendExecuteQuery(node, request, listener);
} }
@Override @Override
@ -91,7 +91,7 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<QuerySea
void executeFetch(final int shardIndex, final SearchShardTarget shardTarget, final AtomicInteger counter, void executeFetch(final int shardIndex, final SearchShardTarget shardTarget, final AtomicInteger counter,
final ShardFetchSearchRequest fetchSearchRequest, DiscoveryNode node) { final ShardFetchSearchRequest fetchSearchRequest, DiscoveryNode node) {
searchService.sendExecuteFetch(node, fetchSearchRequest, new ActionListener<FetchSearchResult>() { searchTransportService.sendExecuteFetch(node, fetchSearchRequest, new ActionListener<FetchSearchResult>() {
@Override @Override
public void onResponse(FetchSearchResult result) { public void onResponse(FetchSearchResult result) {
result.shardTarget(shardTarget); result.shardTarget(shardTarget);

View File

@ -26,7 +26,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.action.SearchServiceTransportAction; import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController; import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.fetch.QueryFetchSearchResult; import org.elasticsearch.search.fetch.QueryFetchSearchResult;
import org.elasticsearch.search.fetch.ScrollQueryFetchSearchResult; import org.elasticsearch.search.fetch.ScrollQueryFetchSearchResult;
@ -42,7 +42,7 @@ class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction {
private final ESLogger logger; private final ESLogger logger;
private final SearchPhaseController searchPhaseController; private final SearchPhaseController searchPhaseController;
private final SearchServiceTransportAction searchService; private final SearchTransportService searchTransportService;
private final SearchScrollRequest request; private final SearchScrollRequest request;
private final ActionListener<SearchResponse> listener; private final ActionListener<SearchResponse> listener;
private final ParsedScrollId scrollId; private final ParsedScrollId scrollId;
@ -53,11 +53,11 @@ class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction {
private final AtomicInteger counter; private final AtomicInteger counter;
SearchScrollQueryAndFetchAsyncAction(ESLogger logger, ClusterService clusterService, SearchScrollQueryAndFetchAsyncAction(ESLogger logger, ClusterService clusterService,
SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController, SearchTransportService searchTransportService, SearchPhaseController searchPhaseController,
SearchScrollRequest request, ParsedScrollId scrollId, ActionListener<SearchResponse> listener) { SearchScrollRequest request, ParsedScrollId scrollId, ActionListener<SearchResponse> listener) {
this.logger = logger; this.logger = logger;
this.searchPhaseController = searchPhaseController; this.searchPhaseController = searchPhaseController;
this.searchService = searchService; this.searchTransportService = searchTransportService;
this.request = request; this.request = request;
this.listener = listener; this.listener = listener;
this.scrollId = scrollId; this.scrollId = scrollId;
@ -128,7 +128,7 @@ class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction {
void executePhase(final int shardIndex, DiscoveryNode node, final long searchId) { void executePhase(final int shardIndex, DiscoveryNode node, final long searchId) {
InternalScrollSearchRequest internalRequest = internalScrollSearchRequest(searchId, request); InternalScrollSearchRequest internalRequest = internalScrollSearchRequest(searchId, request);
searchService.sendExecuteFetch(node, internalRequest, new ActionListener<ScrollQueryFetchSearchResult>() { searchTransportService.sendExecuteFetch(node, internalRequest, new ActionListener<ScrollQueryFetchSearchResult>() {
@Override @Override
public void onResponse(ScrollQueryFetchSearchResult result) { public void onResponse(ScrollQueryFetchSearchResult result) {
queryFetchResults.set(shardIndex, result.result()); queryFetchResults.set(shardIndex, result.result());

View File

@ -27,7 +27,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.action.SearchServiceTransportAction; import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController; import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.fetch.FetchSearchResult; import org.elasticsearch.search.fetch.FetchSearchResult;
import org.elasticsearch.search.fetch.ShardFetchRequest; import org.elasticsearch.search.fetch.ShardFetchRequest;
@ -44,7 +44,7 @@ import static org.elasticsearch.action.search.TransportSearchHelper.internalScro
class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction { class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
private final ESLogger logger; private final ESLogger logger;
private final SearchServiceTransportAction searchService; private final SearchTransportService searchTransportService;
private final SearchPhaseController searchPhaseController; private final SearchPhaseController searchPhaseController;
private final SearchScrollRequest request; private final SearchScrollRequest request;
private final ActionListener<SearchResponse> listener; private final ActionListener<SearchResponse> listener;
@ -57,10 +57,10 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
private final AtomicInteger successfulOps; private final AtomicInteger successfulOps;
SearchScrollQueryThenFetchAsyncAction(ESLogger logger, ClusterService clusterService, SearchScrollQueryThenFetchAsyncAction(ESLogger logger, ClusterService clusterService,
SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController, SearchTransportService searchTransportService, SearchPhaseController searchPhaseController,
SearchScrollRequest request, ParsedScrollId scrollId, ActionListener<SearchResponse> listener) { SearchScrollRequest request, ParsedScrollId scrollId, ActionListener<SearchResponse> listener) {
this.logger = logger; this.logger = logger;
this.searchService = searchService; this.searchTransportService = searchTransportService;
this.searchPhaseController = searchPhaseController; this.searchPhaseController = searchPhaseController;
this.request = request; this.request = request;
this.listener = listener; this.listener = listener;
@ -124,7 +124,7 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
private void executeQueryPhase(final int shardIndex, final AtomicInteger counter, DiscoveryNode node, final long searchId) { private void executeQueryPhase(final int shardIndex, final AtomicInteger counter, DiscoveryNode node, final long searchId) {
InternalScrollSearchRequest internalRequest = internalScrollSearchRequest(searchId, request); InternalScrollSearchRequest internalRequest = internalScrollSearchRequest(searchId, request);
searchService.sendExecuteQuery(node, internalRequest, new ActionListener<ScrollQuerySearchResult>() { searchTransportService.sendExecuteQuery(node, internalRequest, new ActionListener<ScrollQuerySearchResult>() {
@Override @Override
public void onResponse(ScrollQuerySearchResult result) { public void onResponse(ScrollQuerySearchResult result) {
queryResults.set(shardIndex, result.queryResult()); queryResults.set(shardIndex, result.queryResult());
@ -182,7 +182,7 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
ScoreDoc lastEmittedDoc = lastEmittedDocPerShard[entry.index]; ScoreDoc lastEmittedDoc = lastEmittedDocPerShard[entry.index];
ShardFetchRequest shardFetchRequest = new ShardFetchRequest(querySearchResult.id(), docIds, lastEmittedDoc); ShardFetchRequest shardFetchRequest = new ShardFetchRequest(querySearchResult.id(), docIds, lastEmittedDoc);
DiscoveryNode node = nodes.get(querySearchResult.shardTarget().nodeId()); DiscoveryNode node = nodes.get(querySearchResult.shardTarget().nodeId());
searchService.sendExecuteFetchScroll(node, shardFetchRequest, new ActionListener<FetchSearchResult>() { searchTransportService.sendExecuteFetchScroll(node, shardFetchRequest, new ActionListener<FetchSearchResult>() {
@Override @Override
public void onResponse(FetchSearchResult result) { public void onResponse(FetchSearchResult result) {
result.shardTarget(querySearchResult.shardTarget()); result.shardTarget(querySearchResult.shardTarget());

View File

@ -30,7 +30,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.common.util.concurrent.CountDown;
import org.elasticsearch.search.action.SearchServiceTransportAction; import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
@ -47,15 +47,15 @@ import static org.elasticsearch.action.search.TransportSearchHelper.parseScrollI
public class TransportClearScrollAction extends HandledTransportAction<ClearScrollRequest, ClearScrollResponse> { public class TransportClearScrollAction extends HandledTransportAction<ClearScrollRequest, ClearScrollResponse> {
private final ClusterService clusterService; private final ClusterService clusterService;
private final SearchServiceTransportAction searchServiceTransportAction; private final SearchTransportService searchTransportService;
@Inject @Inject
public TransportClearScrollAction(Settings settings, TransportService transportService, ThreadPool threadPool, public TransportClearScrollAction(Settings settings, TransportService transportService, ThreadPool threadPool,
ClusterService clusterService, SearchServiceTransportAction searchServiceTransportAction, ClusterService clusterService, SearchTransportService searchTransportService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, ClearScrollAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, ClearScrollRequest::new); super(settings, ClearScrollAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, ClearScrollRequest::new);
this.clusterService = clusterService; this.clusterService = clusterService;
this.searchServiceTransportAction = searchServiceTransportAction; this.searchTransportService = searchTransportService;
} }
@Override @Override
@ -64,10 +64,8 @@ public class TransportClearScrollAction extends HandledTransportAction<ClearScro
} }
private class Async { private class Async {
final DiscoveryNodes nodes; final DiscoveryNodes nodes;
final CountDown expectedOps; final CountDown expectedOps;
final ClearScrollRequest request;
final List<ScrollIdForNode[]> contexts = new ArrayList<>(); final List<ScrollIdForNode[]> contexts = new ArrayList<>();
final ActionListener<ClearScrollResponse> listener; final ActionListener<ClearScrollResponse> listener;
final AtomicReference<Throwable> expHolder; final AtomicReference<Throwable> expHolder;
@ -85,8 +83,6 @@ public class TransportClearScrollAction extends HandledTransportAction<ClearScro
this.contexts.add(context); this.contexts.add(context);
} }
} }
this.request = request;
this.listener = listener; this.listener = listener;
this.expHolder = new AtomicReference<>(); this.expHolder = new AtomicReference<>();
this.expectedOps = new CountDown(expectedOps); this.expectedOps = new CountDown(expectedOps);
@ -100,7 +96,7 @@ public class TransportClearScrollAction extends HandledTransportAction<ClearScro
if (contexts.isEmpty()) { if (contexts.isEmpty()) {
for (final DiscoveryNode node : nodes) { for (final DiscoveryNode node : nodes) {
searchServiceTransportAction.sendClearAllScrollContexts(node, request, new ActionListener<TransportResponse>() { searchTransportService.sendClearAllScrollContexts(node, new ActionListener<TransportResponse>() {
@Override @Override
public void onResponse(TransportResponse response) { public void onResponse(TransportResponse response) {
onFreedContext(true); onFreedContext(true);
@ -121,9 +117,9 @@ public class TransportClearScrollAction extends HandledTransportAction<ClearScro
continue; continue;
} }
searchServiceTransportAction.sendFreeContext(node, target.getScrollId(), request, new ActionListener<SearchServiceTransportAction.SearchFreeContextResponse>() { searchTransportService.sendFreeContext(node, target.getScrollId(), new ActionListener<SearchTransportService.SearchFreeContextResponse>() {
@Override @Override
public void onResponse(SearchServiceTransportAction.SearchFreeContextResponse freed) { public void onResponse(SearchTransportService.SearchFreeContextResponse freed) {
onFreedContext(freed.isFreed()); onFreedContext(freed.isFreed());
} }

View File

@ -29,7 +29,7 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.indices.IndexClosedException; import org.elasticsearch.indices.IndexClosedException;
import org.elasticsearch.search.action.SearchServiceTransportAction; import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController; import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
@ -45,17 +45,17 @@ import static org.elasticsearch.action.search.SearchType.QUERY_AND_FETCH;
public class TransportSearchAction extends HandledTransportAction<SearchRequest, SearchResponse> { public class TransportSearchAction extends HandledTransportAction<SearchRequest, SearchResponse> {
private final ClusterService clusterService; private final ClusterService clusterService;
private final SearchServiceTransportAction searchService; private final SearchTransportService searchTransportService;
private final SearchPhaseController searchPhaseController; private final SearchPhaseController searchPhaseController;
@Inject @Inject
public TransportSearchAction(Settings settings, ThreadPool threadPool, SearchPhaseController searchPhaseController, public TransportSearchAction(Settings settings, ThreadPool threadPool, SearchPhaseController searchPhaseController,
TransportService transportService, SearchServiceTransportAction searchService, TransportService transportService, SearchTransportService searchTransportService,
ClusterService clusterService, ActionFilters actionFilters, IndexNameExpressionResolver ClusterService clusterService, ActionFilters actionFilters, IndexNameExpressionResolver
indexNameExpressionResolver) { indexNameExpressionResolver) {
super(settings, SearchAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, SearchRequest::new); super(settings, SearchAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, SearchRequest::new);
this.searchPhaseController = searchPhaseController; this.searchPhaseController = searchPhaseController;
this.searchService = searchService; this.searchTransportService = searchTransportService;
this.clusterService = clusterService; this.clusterService = clusterService;
} }
@ -81,19 +81,19 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
AbstractSearchAsyncAction searchAsyncAction; AbstractSearchAsyncAction searchAsyncAction;
switch(searchRequest.searchType()) { switch(searchRequest.searchType()) {
case DFS_QUERY_THEN_FETCH: case DFS_QUERY_THEN_FETCH:
searchAsyncAction = new SearchDfsQueryThenFetchAsyncAction(logger, searchService, clusterService, searchAsyncAction = new SearchDfsQueryThenFetchAsyncAction(logger, searchTransportService, clusterService,
indexNameExpressionResolver, searchPhaseController, threadPool, searchRequest, listener); indexNameExpressionResolver, searchPhaseController, threadPool, searchRequest, listener);
break; break;
case QUERY_THEN_FETCH: case QUERY_THEN_FETCH:
searchAsyncAction = new SearchQueryThenFetchAsyncAction(logger, searchService, clusterService, searchAsyncAction = new SearchQueryThenFetchAsyncAction(logger, searchTransportService, clusterService,
indexNameExpressionResolver, searchPhaseController, threadPool, searchRequest, listener); indexNameExpressionResolver, searchPhaseController, threadPool, searchRequest, listener);
break; break;
case DFS_QUERY_AND_FETCH: case DFS_QUERY_AND_FETCH:
searchAsyncAction = new SearchDfsQueryAndFetchAsyncAction(logger, searchService, clusterService, searchAsyncAction = new SearchDfsQueryAndFetchAsyncAction(logger, searchTransportService, clusterService,
indexNameExpressionResolver, searchPhaseController, threadPool, searchRequest, listener); indexNameExpressionResolver, searchPhaseController, threadPool, searchRequest, listener);
break; break;
case QUERY_AND_FETCH: case QUERY_AND_FETCH:
searchAsyncAction = new SearchQueryAndFetchAsyncAction(logger, searchService, clusterService, searchAsyncAction = new SearchQueryAndFetchAsyncAction(logger, searchTransportService, clusterService,
indexNameExpressionResolver, searchPhaseController, threadPool, searchRequest, listener); indexNameExpressionResolver, searchPhaseController, threadPool, searchRequest, listener);
break; break;
default: default:

View File

@ -26,7 +26,7 @@ import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.search.action.SearchServiceTransportAction; import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController; import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
@ -41,18 +41,18 @@ import static org.elasticsearch.action.search.TransportSearchHelper.parseScrollI
public class TransportSearchScrollAction extends HandledTransportAction<SearchScrollRequest, SearchResponse> { public class TransportSearchScrollAction extends HandledTransportAction<SearchScrollRequest, SearchResponse> {
private final ClusterService clusterService; private final ClusterService clusterService;
private final SearchServiceTransportAction searchService; private final SearchTransportService searchTransportService;
private final SearchPhaseController searchPhaseController; private final SearchPhaseController searchPhaseController;
@Inject @Inject
public TransportSearchScrollAction(Settings settings, ThreadPool threadPool, TransportService transportService, public TransportSearchScrollAction(Settings settings, ThreadPool threadPool, TransportService transportService,
ClusterService clusterService, SearchServiceTransportAction searchService, ClusterService clusterService, SearchTransportService searchTransportService,
SearchPhaseController searchPhaseController, SearchPhaseController searchPhaseController,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, SearchScrollAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, super(settings, SearchScrollAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver,
SearchScrollRequest::new); SearchScrollRequest::new);
this.clusterService = clusterService; this.clusterService = clusterService;
this.searchService = searchService; this.searchTransportService = searchTransportService;
this.searchPhaseController = searchPhaseController; this.searchPhaseController = searchPhaseController;
} }
@ -63,11 +63,11 @@ public class TransportSearchScrollAction extends HandledTransportAction<SearchSc
AbstractAsyncAction action; AbstractAsyncAction action;
switch (scrollId.getType()) { switch (scrollId.getType()) {
case QUERY_THEN_FETCH_TYPE: case QUERY_THEN_FETCH_TYPE:
action = new SearchScrollQueryThenFetchAsyncAction(logger, clusterService, searchService, action = new SearchScrollQueryThenFetchAsyncAction(logger, clusterService, searchTransportService,
searchPhaseController, request, scrollId, listener); searchPhaseController, request, scrollId, listener);
break; break;
case QUERY_AND_FETCH_TYPE: case QUERY_AND_FETCH_TYPE:
action = new SearchScrollQueryAndFetchAsyncAction(logger, clusterService, searchService, action = new SearchScrollQueryAndFetchAsyncAction(logger, clusterService, searchTransportService,
searchPhaseController, request, scrollId, listener); searchPhaseController, request, scrollId, listener);
break; break;
default: default:

View File

@ -30,6 +30,7 @@ import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskListener;
import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.tasks.TaskManager;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
@ -72,6 +73,13 @@ public abstract class TransportAction<Request extends ActionRequest<Request>, Re
* This is a typical behavior. * This is a typical behavior.
*/ */
public final Task execute(Request request, ActionListener<Response> listener) { public final Task execute(Request request, ActionListener<Response> listener) {
/*
* While this version of execute could delegate to the TaskListener
* version of execute that'd add yet another layer of wrapping on the
* listener and prevent us from using the listener bare if there isn't a
* task. That just seems like too many objects. Thus the two versions of
* this method.
*/
Task task = taskManager.register("transport", actionName, request); Task task = taskManager.register("transport", actionName, request);
if (task == null) { if (task == null) {
execute(null, request, listener); execute(null, request, listener);
@ -93,11 +101,32 @@ public abstract class TransportAction<Request extends ActionRequest<Request>, Re
return task; return task;
} }
public final Task execute(Request request, TaskListener<Response> listener) {
Task task = taskManager.register("transport", actionName, request);
execute(task, request, new ActionListener<Response>() {
@Override
public void onResponse(Response response) {
if (task != null) {
taskManager.unregister(task);
}
listener.onResponse(task, response);
}
@Override
public void onFailure(Throwable e) {
if (task != null) {
taskManager.unregister(task);
}
listener.onFailure(task, e);
}
});
return task;
}
/** /**
* Use this method when the transport action should continue to run in the context of the current task * Use this method when the transport action should continue to run in the context of the current task
*/ */
public final void execute(Task task, Request request, ActionListener<Response> listener) { public final void execute(Task task, Request request, ActionListener<Response> listener) {
ActionRequestValidationException validationException = request.validate(); ActionRequestValidationException validationException = request.validate();
if (validationException != null) { if (validationException != null) {
listener.onFailure(validationException); listener.onFailure(validationException);

View File

@ -25,12 +25,12 @@ import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.node.DiscoveryNodes;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.Objects;
/** /**
* * An event received by the local node, signaling that the cluster state has changed.
*/ */
public class ClusterChangedEvent { public class ClusterChangedEvent {
@ -43,6 +43,9 @@ public class ClusterChangedEvent {
private final DiscoveryNodes.Delta nodesDelta; private final DiscoveryNodes.Delta nodesDelta;
public ClusterChangedEvent(String source, ClusterState state, ClusterState previousState) { public ClusterChangedEvent(String source, ClusterState state, ClusterState previousState) {
Objects.requireNonNull(source, "source must not be null");
Objects.requireNonNull(state, "state must not be null");
Objects.requireNonNull(previousState, "previousState must not be null");
this.source = source; this.source = source;
this.state = state; this.state = state;
this.previousState = previousState; this.previousState = previousState;
@ -56,19 +59,35 @@ public class ClusterChangedEvent {
return this.source; return this.source;
} }
/**
* The new cluster state that caused this change event.
*/
public ClusterState state() { public ClusterState state() {
return this.state; return this.state;
} }
/**
* The previous cluster state for this change event.
*/
public ClusterState previousState() { public ClusterState previousState() {
return this.previousState; return this.previousState;
} }
/**
* Returns <code>true</code> iff the routing tables (for all indices) have
* changed between the previous cluster state and the current cluster state.
* Note that this is an object reference equality test, not an equals test.
*/
public boolean routingTableChanged() { public boolean routingTableChanged() {
return state.routingTable() != previousState.routingTable(); return state.routingTable() != previousState.routingTable();
} }
/**
* Returns <code>true</code> iff the routing table has changed for the given index.
* Note that this is an object reference equality test, not an equals test.
*/
public boolean indexRoutingTableChanged(String index) { public boolean indexRoutingTableChanged(String index) {
Objects.requireNonNull(index, "index must not be null");
if (!state.routingTable().hasIndex(index) && !previousState.routingTable().hasIndex(index)) { if (!state.routingTable().hasIndex(index) && !previousState.routingTable().hasIndex(index)) {
return false; return false;
} }
@ -82,9 +101,6 @@ public class ClusterChangedEvent {
* Returns the indices created in this event * Returns the indices created in this event
*/ */
public List<String> indicesCreated() { public List<String> indicesCreated() {
if (previousState == null) {
return Arrays.asList(state.metaData().indices().keys().toArray(String.class));
}
if (!metaDataChanged()) { if (!metaDataChanged()) {
return Collections.emptyList(); return Collections.emptyList();
} }
@ -105,20 +121,14 @@ public class ClusterChangedEvent {
* Returns the indices deleted in this event * Returns the indices deleted in this event
*/ */
public List<String> indicesDeleted() { public List<String> indicesDeleted() {
// If the new cluster state has a new cluster UUID, the likely scenario is that a node was elected
// if the new cluster state has a new master then we cannot know if an index which is not in the cluster state // master that has had its data directory wiped out, in which case we don't want to delete the indices and lose data;
// is actually supposed to be deleted or imported as dangling instead. for example a new master might not have // rather we want to import them as dangling indices instead. So we check here if the cluster UUID differs from the previous
// the index in its cluster state because it was started with an empty data folder and in this case we want to // cluster UUID, in which case, we don't want to delete indices that the master erroneously believes shouldn't exist.
// import as dangling. we check here for new master too to be on the safe side in this case. // See test DiscoveryWithServiceDisruptionsIT.testIndicesDeleted()
// This means that under certain conditions deleted indices might be reimported if a master fails while the deletion
// request is issued and a node receives the cluster state that would trigger the deletion from the new master.
// See test MetaDataWriteDataNodesTests.testIndicesDeleted()
// See discussion on https://github.com/elastic/elasticsearch/pull/9952 and // See discussion on https://github.com/elastic/elasticsearch/pull/9952 and
// https://github.com/elastic/elasticsearch/issues/11665 // https://github.com/elastic/elasticsearch/issues/11665
if (hasNewMaster() || previousState == null) { if (metaDataChanged() == false || isNewCluster()) {
return Collections.emptyList();
}
if (!metaDataChanged()) {
return Collections.emptyList(); return Collections.emptyList();
} }
List<String> deleted = null; List<String> deleted = null;
@ -134,10 +144,20 @@ public class ClusterChangedEvent {
return deleted == null ? Collections.<String>emptyList() : deleted; return deleted == null ? Collections.<String>emptyList() : deleted;
} }
/**
* Returns <code>true</code> iff the metadata for the cluster has changed between
* the previous cluster state and the new cluster state. Note that this is an object
* reference equality test, not an equals test.
*/
public boolean metaDataChanged() { public boolean metaDataChanged() {
return state.metaData() != previousState.metaData(); return state.metaData() != previousState.metaData();
} }
/**
* Returns <code>true</code> iff the {@link IndexMetaData} for a given index
* has changed between the previous cluster state and the new cluster state.
* Note that this is an object reference equality test, not an equals test.
*/
public boolean indexMetaDataChanged(IndexMetaData current) { public boolean indexMetaDataChanged(IndexMetaData current) {
MetaData previousMetaData = previousState.metaData(); MetaData previousMetaData = previousState.metaData();
if (previousMetaData == null) { if (previousMetaData == null) {
@ -152,46 +172,56 @@ public class ClusterChangedEvent {
return true; return true;
} }
/**
* Returns <code>true</code> iff the cluster level blocks have changed between cluster states.
* Note that this is an object reference equality test, not an equals test.
*/
public boolean blocksChanged() { public boolean blocksChanged() {
return state.blocks() != previousState.blocks(); return state.blocks() != previousState.blocks();
} }
/**
* Returns <code>true</code> iff the local node is the mater node of the cluster.
*/
public boolean localNodeMaster() { public boolean localNodeMaster() {
return state.nodes().localNodeMaster(); return state.nodes().localNodeMaster();
} }
/**
* Returns the {@link org.elasticsearch.cluster.node.DiscoveryNodes.Delta} between
* the previous cluster state and the new cluster state.
*/
public DiscoveryNodes.Delta nodesDelta() { public DiscoveryNodes.Delta nodesDelta() {
return this.nodesDelta; return this.nodesDelta;
} }
/**
* Returns <code>true</code> iff nodes have been removed from the cluster since the last cluster state.
*/
public boolean nodesRemoved() { public boolean nodesRemoved() {
return nodesDelta.removed(); return nodesDelta.removed();
} }
/**
* Returns <code>true</code> iff nodes have been added from the cluster since the last cluster state.
*/
public boolean nodesAdded() { public boolean nodesAdded() {
return nodesDelta.added(); return nodesDelta.added();
} }
/**
* Returns <code>true</code> iff nodes have been changed (added or removed) from the cluster since the last cluster state.
*/
public boolean nodesChanged() { public boolean nodesChanged() {
return nodesRemoved() || nodesAdded(); return nodesRemoved() || nodesAdded();
} }
/** // Determines whether or not the current cluster state represents an entirely
* Checks if this cluster state comes from a different master than the previous one. // different cluster from the previous cluster state, which will happen when a
* This is a workaround for the scenario where a node misses a cluster state that has either // master node is elected that has never been part of the cluster before.
* no master block or state not recovered flag set. In this case we must make sure that private boolean isNewCluster() {
* if an index is missing from the cluster state is not deleted immediately but instead imported final String prevClusterUUID = previousState.metaData().clusterUUID();
* as dangling. See discussion on https://github.com/elastic/elasticsearch/pull/9952 final String currClusterUUID = state.metaData().clusterUUID();
*/ return prevClusterUUID.equals(currClusterUUID) == false;
private boolean hasNewMaster() {
String oldMaster = previousState().getNodes().masterNodeId();
String newMaster = state().getNodes().masterNodeId();
if (oldMaster == null && newMaster == null) {
return false;
}
if (oldMaster == null && newMaster != null) {
return true;
}
return oldMaster.equals(newMaster) == false;
} }
} }

View File

@ -46,6 +46,11 @@ import static org.elasticsearch.common.transport.TransportAddressSerializers.add
*/ */
public class DiscoveryNode implements Streamable, ToXContent { public class DiscoveryNode implements Streamable, ToXContent {
public static final String DATA_ATTR = "data";
public static final String MASTER_ATTR = "master";
public static final String CLIENT_ATTR = "client";
public static final String INGEST_ATTR = "ingest";
public static boolean localNode(Settings settings) { public static boolean localNode(Settings settings) {
if (Node.NODE_LOCAL_SETTING.exists(settings)) { if (Node.NODE_LOCAL_SETTING.exists(settings)) {
return Node.NODE_LOCAL_SETTING.get(settings); return Node.NODE_LOCAL_SETTING.get(settings);
@ -274,7 +279,7 @@ public class DiscoveryNode implements Streamable, ToXContent {
* Should this node hold data (shards) or not. * Should this node hold data (shards) or not.
*/ */
public boolean dataNode() { public boolean dataNode() {
String data = attributes.get("data"); String data = attributes.get(DATA_ATTR);
if (data == null) { if (data == null) {
return !clientNode(); return !clientNode();
} }
@ -292,7 +297,7 @@ public class DiscoveryNode implements Streamable, ToXContent {
* Is the node a client node or not. * Is the node a client node or not.
*/ */
public boolean clientNode() { public boolean clientNode() {
String client = attributes.get("client"); String client = attributes.get(CLIENT_ATTR);
return client != null && Booleans.parseBooleanExact(client); return client != null && Booleans.parseBooleanExact(client);
} }
@ -304,7 +309,7 @@ public class DiscoveryNode implements Streamable, ToXContent {
* Can this node become master or not. * Can this node become master or not.
*/ */
public boolean masterNode() { public boolean masterNode() {
String master = attributes.get("master"); String master = attributes.get(MASTER_ATTR);
if (master == null) { if (master == null) {
return !clientNode(); return !clientNode();
} }
@ -322,7 +327,7 @@ public class DiscoveryNode implements Streamable, ToXContent {
* Returns a boolean that tells whether this an ingest node or not * Returns a boolean that tells whether this an ingest node or not
*/ */
public boolean isIngestNode() { public boolean isIngestNode() {
String ingest = attributes.get("ingest"); String ingest = attributes.get(INGEST_ATTR);
return ingest == null ? true : Booleans.parseBooleanExact(ingest); return ingest == null ? true : Booleans.parseBooleanExact(ingest);
} }

View File

@ -219,6 +219,9 @@ public final class ClusterSettings extends AbstractScopedSettings {
HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED, HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED,
HttpTransportSettings.SETTING_PIPELINING, HttpTransportSettings.SETTING_PIPELINING,
HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN, HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN,
HttpTransportSettings.SETTING_HTTP_HOST,
HttpTransportSettings.SETTING_HTTP_PUBLISH_HOST,
HttpTransportSettings.SETTING_HTTP_BIND_HOST,
HttpTransportSettings.SETTING_HTTP_PORT, HttpTransportSettings.SETTING_HTTP_PORT,
HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT, HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT,
HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS, HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS,

View File

@ -47,7 +47,7 @@ public final class HttpTransportSettings {
public static final Setting<List<String>> SETTING_HTTP_BIND_HOST = listSetting("http.bind_host", SETTING_HTTP_HOST, s -> s, false, Scope.CLUSTER); public static final Setting<List<String>> SETTING_HTTP_BIND_HOST = listSetting("http.bind_host", SETTING_HTTP_HOST, s -> s, false, Scope.CLUSTER);
public static final Setting<PortsRange> SETTING_HTTP_PORT = new Setting<PortsRange>("http.port", "9200-9300", PortsRange::new, false, Scope.CLUSTER); public static final Setting<PortsRange> SETTING_HTTP_PORT = new Setting<PortsRange>("http.port", "9200-9300", PortsRange::new, false, Scope.CLUSTER);
public static final Setting<Integer> SETTING_HTTP_PUBLISH_PORT = Setting.intSetting("http.publish_port", 0, 0, false, Scope.CLUSTER); public static final Setting<Integer> SETTING_HTTP_PUBLISH_PORT = Setting.intSetting("http.publish_port", -1, -1, false, Scope.CLUSTER);
public static final Setting<Boolean> SETTING_HTTP_DETAILED_ERRORS_ENABLED = Setting.boolSetting("http.detailed_errors.enabled", true, false, Scope.CLUSTER); public static final Setting<Boolean> SETTING_HTTP_DETAILED_ERRORS_ENABLED = Setting.boolSetting("http.detailed_errors.enabled", true, false, Scope.CLUSTER);
public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_CONTENT_LENGTH = Setting.byteSizeSetting("http.max_content_length", new ByteSizeValue(100, ByteSizeUnit.MB), false, Scope.CLUSTER) ; public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_CONTENT_LENGTH = Setting.byteSizeSetting("http.max_content_length", new ByteSizeValue(100, ByteSizeUnit.MB), false, Scope.CLUSTER) ;
public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_CHUNK_SIZE = Setting.byteSizeSetting("http.max_chunk_size", new ByteSizeValue(8, ByteSizeUnit.KB), false, Scope.CLUSTER) ; public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_CHUNK_SIZE = Setting.byteSizeSetting("http.max_chunk_size", new ByteSizeValue(8, ByteSizeUnit.KB), false, Scope.CLUSTER) ;

View File

@ -19,6 +19,8 @@
package org.elasticsearch.http.netty; package org.elasticsearch.http.netty;
import com.carrotsearch.hppc.IntHashSet;
import com.carrotsearch.hppc.IntSet;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
@ -192,8 +194,6 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
protected final boolean detailedErrorsEnabled; protected final boolean detailedErrorsEnabled;
protected final ThreadPool threadPool; protected final ThreadPool threadPool;
protected int publishPort;
protected final boolean tcpNoDelay; protected final boolean tcpNoDelay;
protected final boolean tcpKeepAlive; protected final boolean tcpKeepAlive;
protected final boolean reuseAddress; protected final boolean reuseAddress;
@ -237,7 +237,6 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
this.port = SETTING_HTTP_PORT.get(settings); this.port = SETTING_HTTP_PORT.get(settings);
this.bindHosts = SETTING_HTTP_BIND_HOST.get(settings).toArray(Strings.EMPTY_ARRAY); this.bindHosts = SETTING_HTTP_BIND_HOST.get(settings).toArray(Strings.EMPTY_ARRAY);
this.publishHosts = SETTING_HTTP_PUBLISH_HOST.get(settings).toArray(Strings.EMPTY_ARRAY); this.publishHosts = SETTING_HTTP_PUBLISH_HOST.get(settings).toArray(Strings.EMPTY_ARRAY);
this.publishPort = SETTING_HTTP_PUBLISH_PORT.get(settings);
this.tcpNoDelay = SETTING_HTTP_TCP_NO_DELAY.get(settings); this.tcpNoDelay = SETTING_HTTP_TCP_NO_DELAY.get(settings);
this.tcpKeepAlive = SETTING_HTTP_TCP_KEEP_ALIVE.get(settings); this.tcpKeepAlive = SETTING_HTTP_TCP_KEEP_ALIVE.get(settings);
this.reuseAddress = SETTING_HTTP_TCP_REUSE_ADDRESS.get(settings); this.reuseAddress = SETTING_HTTP_TCP_REUSE_ADDRESS.get(settings);
@ -312,7 +311,10 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
serverBootstrap.setOption("child.receiveBufferSizePredictorFactory", receiveBufferSizePredictorFactory); serverBootstrap.setOption("child.receiveBufferSizePredictorFactory", receiveBufferSizePredictorFactory);
serverBootstrap.setOption("reuseAddress", reuseAddress); serverBootstrap.setOption("reuseAddress", reuseAddress);
serverBootstrap.setOption("child.reuseAddress", reuseAddress); serverBootstrap.setOption("child.reuseAddress", reuseAddress);
this.boundAddress = createBoundHttpAddress();
}
private BoundTransportAddress createBoundHttpAddress() {
// Bind and start to accept incoming connections. // Bind and start to accept incoming connections.
InetAddress hostAddresses[]; InetAddress hostAddresses[];
try { try {
@ -333,7 +335,16 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
throw new BindTransportException("Failed to resolve publish address", e); throw new BindTransportException("Failed to resolve publish address", e);
} }
if (0 == publishPort) { final int publishPort = resolvePublishPort(settings, boundAddresses, publishInetAddress);
final InetSocketAddress publishAddress = new InetSocketAddress(publishInetAddress, publishPort);
return new BoundTransportAddress(boundAddresses.toArray(new TransportAddress[boundAddresses.size()]), new InetSocketTransportAddress(publishAddress));
}
// package private for tests
static int resolvePublishPort(Settings settings, List<InetSocketTransportAddress> boundAddresses, InetAddress publishInetAddress) {
int publishPort = SETTING_HTTP_PUBLISH_PORT.get(settings);
if (publishPort < 0) {
for (InetSocketTransportAddress boundAddress : boundAddresses) { for (InetSocketTransportAddress boundAddress : boundAddresses) {
InetAddress boundInetAddress = boundAddress.address().getAddress(); InetAddress boundInetAddress = boundAddress.address().getAddress();
if (boundInetAddress.isAnyLocalAddress() || boundInetAddress.equals(publishInetAddress)) { if (boundInetAddress.isAnyLocalAddress() || boundInetAddress.equals(publishInetAddress)) {
@ -343,13 +354,23 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
} }
} }
if (0 == publishPort) { // if no matching boundAddress found, check if there is a unique port for all bound addresses
throw new BindHttpException("Publish address [" + publishInetAddress + "] does not match any of the bound addresses [" + boundAddresses + "]"); if (publishPort < 0) {
final IntSet ports = new IntHashSet();
for (InetSocketTransportAddress boundAddress : boundAddresses) {
ports.add(boundAddress.getPort());
}
if (ports.size() == 1) {
publishPort = ports.iterator().next().value;
}
} }
final InetSocketAddress publishAddress = new InetSocketAddress(publishInetAddress, publishPort); if (publishPort < 0) {
; throw new BindHttpException("Failed to auto-resolve http publish port, multiple bound addresses " + boundAddresses +
this.boundAddress = new BoundTransportAddress(boundAddresses.toArray(new TransportAddress[boundAddresses.size()]), new InetSocketTransportAddress(publishAddress)); " with distinct ports and none of them matched the publish address (" + publishInetAddress + "). " +
"Please specify a unique port by setting " + SETTING_HTTP_PORT.getKey() + " or " + SETTING_HTTP_PUBLISH_PORT.getKey());
}
return publishPort;
} }
private CorsConfig buildCorsConfig(Settings settings) { private CorsConfig buildCorsConfig(Settings settings) {

View File

@ -19,7 +19,8 @@
package org.elasticsearch.ingest; package org.elasticsearch.ingest;
import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.Diff;
import org.elasticsearch.cluster.DiffableUtils;
import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
@ -39,7 +40,7 @@ import java.util.Map;
/** /**
* Holds the ingest pipelines that are available in the cluster * Holds the ingest pipelines that are available in the cluster
*/ */
public final class IngestMetadata extends AbstractDiffable<MetaData.Custom> implements MetaData.Custom { public final class IngestMetadata implements MetaData.Custom {
public final static String TYPE = "ingest"; public final static String TYPE = "ingest";
public final static IngestMetadata PROTO = new IngestMetadata(); public final static IngestMetadata PROTO = new IngestMetadata();
@ -50,7 +51,6 @@ public final class IngestMetadata extends AbstractDiffable<MetaData.Custom> impl
INGEST_METADATA_PARSER.declareObjectArray(List::addAll , PipelineConfiguration.getParser(), PIPELINES_FIELD); INGEST_METADATA_PARSER.declareObjectArray(List::addAll , PipelineConfiguration.getParser(), PIPELINES_FIELD);
} }
// We can't use Pipeline class directly in cluster state, because we don't have the processor factories around when // We can't use Pipeline class directly in cluster state, because we don't have the processor factories around when
// IngestMetadata is registered as custom metadata. // IngestMetadata is registered as custom metadata.
private final Map<String, PipelineConfiguration> pipelines; private final Map<String, PipelineConfiguration> pipelines;
@ -73,7 +73,7 @@ public final class IngestMetadata extends AbstractDiffable<MetaData.Custom> impl
} }
@Override @Override
public MetaData.Custom readFrom(StreamInput in) throws IOException { public IngestMetadata readFrom(StreamInput in) throws IOException {
int size = in.readVInt(); int size = in.readVInt();
Map<String, PipelineConfiguration> pipelines = new HashMap<>(size); Map<String, PipelineConfiguration> pipelines = new HashMap<>(size);
for (int i = 0; i < size; i++) { for (int i = 0; i < size; i++) {
@ -92,7 +92,7 @@ public final class IngestMetadata extends AbstractDiffable<MetaData.Custom> impl
} }
@Override @Override
public MetaData.Custom fromXContent(XContentParser parser) throws IOException { public IngestMetadata fromXContent(XContentParser parser) throws IOException {
Map<String, PipelineConfiguration> pipelines = new HashMap<>(); Map<String, PipelineConfiguration> pipelines = new HashMap<>();
List<PipelineConfiguration> configs = INGEST_METADATA_PARSER.parse(parser); List<PipelineConfiguration> configs = INGEST_METADATA_PARSER.parse(parser);
for (PipelineConfiguration pipeline : configs) { for (PipelineConfiguration pipeline : configs) {
@ -116,4 +116,52 @@ public final class IngestMetadata extends AbstractDiffable<MetaData.Custom> impl
return MetaData.API_AND_GATEWAY; return MetaData.API_AND_GATEWAY;
} }
@Override
public Diff<MetaData.Custom> diff(MetaData.Custom before) {
return new IngestMetadataDiff((IngestMetadata) before, this);
}
@Override
public Diff<MetaData.Custom> readDiffFrom(StreamInput in) throws IOException {
return new IngestMetadataDiff(in);
}
static class IngestMetadataDiff implements Diff<MetaData.Custom> {
final Diff<Map<String, PipelineConfiguration>> pipelines;
IngestMetadataDiff(IngestMetadata before, IngestMetadata after) {
this.pipelines = DiffableUtils.diff(before.pipelines, after.pipelines, DiffableUtils.getStringKeySerializer());
}
public IngestMetadataDiff(StreamInput in) throws IOException {
pipelines = DiffableUtils.readJdkMapDiff(in, DiffableUtils.getStringKeySerializer(), PipelineConfiguration.PROTOTYPE);
}
@Override
public MetaData.Custom apply(MetaData.Custom part) {
return new IngestMetadata(pipelines.apply(((IngestMetadata) part).pipelines));
}
@Override
public void writeTo(StreamOutput out) throws IOException {
pipelines.writeTo(out);
}
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
IngestMetadata that = (IngestMetadata) o;
return pipelines.equals(that.pipelines);
}
@Override
public int hashCode() {
return pipelines.hashCode();
}
} }

View File

@ -19,6 +19,7 @@
package org.elasticsearch.ingest; package org.elasticsearch.ingest;
import org.elasticsearch.cluster.AbstractDiffable;
import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
@ -37,9 +38,10 @@ import java.util.function.BiFunction;
/** /**
* Encapsulates a pipeline's id and configuration as a blob * Encapsulates a pipeline's id and configuration as a blob
*/ */
public final class PipelineConfiguration implements Writeable<PipelineConfiguration>, ToXContent { public final class PipelineConfiguration extends AbstractDiffable<PipelineConfiguration>
implements Writeable<PipelineConfiguration>, ToXContent {
private final static PipelineConfiguration PROTOTYPE = new PipelineConfiguration(null, null); final static PipelineConfiguration PROTOTYPE = new PipelineConfiguration(null, null);
public static PipelineConfiguration readPipelineConfiguration(StreamInput in) throws IOException { public static PipelineConfiguration readPipelineConfiguration(StreamInput in) throws IOException {
return PROTOTYPE.readFrom(in); return PROTOTYPE.readFrom(in);
@ -113,4 +115,22 @@ public final class PipelineConfiguration implements Writeable<PipelineConfigurat
out.writeBytesReference(config); out.writeBytesReference(config);
} }
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
PipelineConfiguration that = (PipelineConfiguration) o;
if (!id.equals(that.id)) return false;
return config.equals(that.config);
}
@Override
public int hashCode() {
int result = id.hashCode();
result = 31 * result + config.hashCode();
return result;
}
} }

View File

@ -392,6 +392,7 @@ public class Node implements Closeable {
injector.getInstance(IndicesTTLService.class).stop(); injector.getInstance(IndicesTTLService.class).stop();
injector.getInstance(RoutingService.class).stop(); injector.getInstance(RoutingService.class).stop();
injector.getInstance(ClusterService.class).stop(); injector.getInstance(ClusterService.class).stop();
injector.getInstance(Discovery.class).stop();
injector.getInstance(MonitorService.class).stop(); injector.getInstance(MonitorService.class).stop();
injector.getInstance(GatewayService.class).stop(); injector.getInstance(GatewayService.class).stop();
injector.getInstance(SearchService.class).stop(); injector.getInstance(SearchService.class).stop();

View File

@ -118,7 +118,7 @@ public class RestNodesAction extends AbstractCatAction {
table.addCell("pid", "default:false;alias:p;desc:process id"); table.addCell("pid", "default:false;alias:p;desc:process id");
table.addCell("ip", "alias:i;desc:ip address"); table.addCell("ip", "alias:i;desc:ip address");
table.addCell("port", "default:false;alias:po;desc:bound transport port"); table.addCell("port", "default:false;alias:po;desc:bound transport port");
table.addCell("http_address", "default:false;alias:http;desc:bound http adress"); table.addCell("http_address", "default:false;alias:http;desc:bound http address");
table.addCell("version", "default:false;alias:v;desc:es version"); table.addCell("version", "default:false;alias:v;desc:es version");
table.addCell("build", "default:false;alias:b;desc:es build hash"); table.addCell("build", "default:false;alias:b;desc:es build hash");
@ -249,7 +249,7 @@ public class RestNodesAction extends AbstractCatAction {
} else { } else {
table.addCell("-"); table.addCell("-");
} }
final Map<String, String> serviceAttributes = info.getServiceAttributes(); final Map<String, String> serviceAttributes = info == null ? null : info.getServiceAttributes();
if (serviceAttributes != null) { if (serviceAttributes != null) {
table.addCell(serviceAttributes.getOrDefault("http_address", "-")); table.addCell(serviceAttributes.getOrDefault("http_address", "-"));
} else { } else {

View File

@ -88,21 +88,35 @@ public class RestSearchAction extends BaseRestHandler {
@Override @Override
public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws IOException { public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws IOException {
SearchRequest searchRequest; SearchRequest searchRequest = new SearchRequest();
searchRequest = RestSearchAction.parseSearchRequest(queryRegistry, request, parseFieldMatcher, aggParsers); RestSearchAction.parseSearchRequest(searchRequest, queryRegistry, request, parseFieldMatcher, aggParsers, null);
client.search(searchRequest, new RestStatusToXContentListener<>(channel)); client.search(searchRequest, new RestStatusToXContentListener<>(channel));
} }
public static SearchRequest parseSearchRequest(IndicesQueriesRegistry indicesQueriesRegistry, RestRequest request, /**
ParseFieldMatcher parseFieldMatcher, AggregatorParsers aggParsers) throws IOException { * Parses the rest request on top of the SearchRequest, preserving values
String[] indices = Strings.splitStringByCommaToArray(request.param("index")); * that are not overridden by the rest request.
SearchRequest searchRequest = new SearchRequest(indices); *
* @param restContent
* override body content to use for the request. If null body
* content is read from the request using
* RestAction.hasBodyContent.
*/
public static void parseSearchRequest(SearchRequest searchRequest, IndicesQueriesRegistry indicesQueriesRegistry, RestRequest request,
ParseFieldMatcher parseFieldMatcher, AggregatorParsers aggParsers, BytesReference restContent) throws IOException {
if (searchRequest.source() == null) {
searchRequest.source(new SearchSourceBuilder());
}
searchRequest.indices(Strings.splitStringByCommaToArray(request.param("index")));
// get the content, and put it in the body // get the content, and put it in the body
// add content/source as template if template flag is set // add content/source as template if template flag is set
boolean isTemplateRequest = request.path().endsWith("/template"); boolean isTemplateRequest = request.path().endsWith("/template");
final SearchSourceBuilder builder; if (restContent == null) {
if (RestActions.hasBodyContent(request)) { if (RestActions.hasBodyContent(request)) {
BytesReference restContent = RestActions.getRestContent(request); restContent = RestActions.getRestContent(request);
}
}
if (restContent != null) {
QueryParseContext context = new QueryParseContext(indicesQueriesRegistry); QueryParseContext context = new QueryParseContext(indicesQueriesRegistry);
if (isTemplateRequest) { if (isTemplateRequest) {
try (XContentParser parser = XContentFactory.xContent(restContent).createParser(restContent)) { try (XContentParser parser = XContentFactory.xContent(restContent).createParser(restContent)) {
@ -111,12 +125,10 @@ public class RestSearchAction extends BaseRestHandler {
Template template = TemplateQueryParser.parse(parser, context.parseFieldMatcher(), "params", "template"); Template template = TemplateQueryParser.parse(parser, context.parseFieldMatcher(), "params", "template");
searchRequest.template(template); searchRequest.template(template);
} }
builder = null;
} else { } else {
builder = RestActions.getRestSearchSource(restContent, indicesQueriesRegistry, parseFieldMatcher, aggParsers); RestActions.parseRestSearchSource(searchRequest.source(), restContent, indicesQueriesRegistry, parseFieldMatcher,
aggParsers);
} }
} else {
builder = null;
} }
// do not allow 'query_and_fetch' or 'dfs_query_and_fetch' search types // do not allow 'query_and_fetch' or 'dfs_query_and_fetch' search types
@ -129,15 +141,7 @@ public class RestSearchAction extends BaseRestHandler {
} else { } else {
searchRequest.searchType(searchType); searchRequest.searchType(searchType);
} }
if (builder == null) { parseSearchSource(searchRequest.source(), request);
SearchSourceBuilder extraBuilder = new SearchSourceBuilder();
if (parseSearchSource(extraBuilder, request)) {
searchRequest.source(extraBuilder);
}
} else {
parseSearchSource(builder, request);
searchRequest.source(builder);
}
searchRequest.requestCache(request.paramAsBoolean("request_cache", null)); searchRequest.requestCache(request.paramAsBoolean("request_cache", null));
String scroll = request.param("scroll"); String scroll = request.param("scroll");
@ -149,41 +153,35 @@ public class RestSearchAction extends BaseRestHandler {
searchRequest.routing(request.param("routing")); searchRequest.routing(request.param("routing"));
searchRequest.preference(request.param("preference")); searchRequest.preference(request.param("preference"));
searchRequest.indicesOptions(IndicesOptions.fromRequest(request, searchRequest.indicesOptions())); searchRequest.indicesOptions(IndicesOptions.fromRequest(request, searchRequest.indicesOptions()));
return searchRequest;
} }
private static boolean parseSearchSource(final SearchSourceBuilder searchSourceBuilder, RestRequest request) { /**
* Parses the rest request on top of the SearchSourceBuilder, preserving
boolean modified = false; * values that are not overridden by the rest request.
*/
private static void parseSearchSource(final SearchSourceBuilder searchSourceBuilder, RestRequest request) {
QueryBuilder<?> queryBuilder = RestActions.urlParamsToQueryBuilder(request); QueryBuilder<?> queryBuilder = RestActions.urlParamsToQueryBuilder(request);
if (queryBuilder != null) { if (queryBuilder != null) {
searchSourceBuilder.query(queryBuilder); searchSourceBuilder.query(queryBuilder);
modified = true;
} }
int from = request.paramAsInt("from", -1); int from = request.paramAsInt("from", -1);
if (from != -1) { if (from != -1) {
searchSourceBuilder.from(from); searchSourceBuilder.from(from);
modified = true;
} }
int size = request.paramAsInt("size", -1); int size = request.paramAsInt("size", -1);
if (size != -1) { if (size != -1) {
searchSourceBuilder.size(size); searchSourceBuilder.size(size);
modified = true;
} }
if (request.hasParam("explain")) { if (request.hasParam("explain")) {
searchSourceBuilder.explain(request.paramAsBoolean("explain", null)); searchSourceBuilder.explain(request.paramAsBoolean("explain", null));
modified = true;
} }
if (request.hasParam("version")) { if (request.hasParam("version")) {
searchSourceBuilder.version(request.paramAsBoolean("version", null)); searchSourceBuilder.version(request.paramAsBoolean("version", null));
modified = true;
} }
if (request.hasParam("timeout")) { if (request.hasParam("timeout")) {
searchSourceBuilder.timeout(request.paramAsTime("timeout", null)); searchSourceBuilder.timeout(request.paramAsTime("timeout", null));
modified = true;
} }
if (request.hasParam("terminate_after")) { if (request.hasParam("terminate_after")) {
int terminateAfter = request.paramAsInt("terminate_after", int terminateAfter = request.paramAsInt("terminate_after",
@ -192,7 +190,6 @@ public class RestSearchAction extends BaseRestHandler {
throw new IllegalArgumentException("terminateAfter must be > 0"); throw new IllegalArgumentException("terminateAfter must be > 0");
} else if (terminateAfter > 0) { } else if (terminateAfter > 0) {
searchSourceBuilder.terminateAfter(terminateAfter); searchSourceBuilder.terminateAfter(terminateAfter);
modified = true;
} }
} }
@ -200,13 +197,11 @@ public class RestSearchAction extends BaseRestHandler {
if (sField != null) { if (sField != null) {
if (!Strings.hasText(sField)) { if (!Strings.hasText(sField)) {
searchSourceBuilder.noFields(); searchSourceBuilder.noFields();
modified = true;
} else { } else {
String[] sFields = Strings.splitStringByCommaToArray(sField); String[] sFields = Strings.splitStringByCommaToArray(sField);
if (sFields != null) { if (sFields != null) {
for (String field : sFields) { for (String field : sFields) {
searchSourceBuilder.field(field); searchSourceBuilder.field(field);
modified = true;
} }
} }
} }
@ -218,7 +213,6 @@ public class RestSearchAction extends BaseRestHandler {
if (sFields != null) { if (sFields != null) {
for (String field : sFields) { for (String field : sFields) {
searchSourceBuilder.fieldDataField(field); searchSourceBuilder.fieldDataField(field);
modified = true;
} }
} }
} }
@ -226,12 +220,10 @@ public class RestSearchAction extends BaseRestHandler {
FetchSourceContext fetchSourceContext = FetchSourceContext.parseFromRestRequest(request); FetchSourceContext fetchSourceContext = FetchSourceContext.parseFromRestRequest(request);
if (fetchSourceContext != null) { if (fetchSourceContext != null) {
searchSourceBuilder.fetchSource(fetchSourceContext); searchSourceBuilder.fetchSource(fetchSourceContext);
modified = true;
} }
if (request.hasParam("track_scores")) { if (request.hasParam("track_scores")) {
searchSourceBuilder.trackScores(request.paramAsBoolean("track_scores", false)); searchSourceBuilder.trackScores(request.paramAsBoolean("track_scores", false));
modified = true;
} }
String sSorts = request.param("sort"); String sSorts = request.param("sort");
@ -244,14 +236,11 @@ public class RestSearchAction extends BaseRestHandler {
String reverse = sort.substring(delimiter + 1); String reverse = sort.substring(delimiter + 1);
if ("asc".equals(reverse)) { if ("asc".equals(reverse)) {
searchSourceBuilder.sort(sortField, SortOrder.ASC); searchSourceBuilder.sort(sortField, SortOrder.ASC);
modified = true;
} else if ("desc".equals(reverse)) { } else if ("desc".equals(reverse)) {
searchSourceBuilder.sort(sortField, SortOrder.DESC); searchSourceBuilder.sort(sortField, SortOrder.DESC);
modified = true;
} }
} else { } else {
searchSourceBuilder.sort(sort); searchSourceBuilder.sort(sort);
modified = true;
} }
} }
} }
@ -259,7 +248,6 @@ public class RestSearchAction extends BaseRestHandler {
String sStats = request.param("stats"); String sStats = request.param("stats");
if (sStats != null) { if (sStats != null) {
searchSourceBuilder.stats(Arrays.asList(Strings.splitStringByCommaToArray(sStats))); searchSourceBuilder.stats(Arrays.asList(Strings.splitStringByCommaToArray(sStats)));
modified = true;
} }
String suggestField = request.param("suggest_field"); String suggestField = request.param("suggest_field");
@ -271,8 +259,6 @@ public class RestSearchAction extends BaseRestHandler {
termSuggestion(suggestField).field(suggestField) termSuggestion(suggestField).field(suggestField)
.text(suggestText).size(suggestSize) .text(suggestText).size(suggestSize)
.suggestMode(SuggestMode.resolve(suggestMode)))); .suggestMode(SuggestMode.resolve(suggestMode))));
modified = true;
} }
return modified;
} }
} }

View File

@ -114,14 +114,14 @@ public class RestActions {
return queryBuilder; return queryBuilder;
} }
public static SearchSourceBuilder getRestSearchSource(BytesReference sourceBytes, IndicesQueriesRegistry queryRegistry, public static void parseRestSearchSource(SearchSourceBuilder source, BytesReference sourceBytes, IndicesQueriesRegistry queryRegistry,
ParseFieldMatcher parseFieldMatcher, AggregatorParsers aggParsers) ParseFieldMatcher parseFieldMatcher, AggregatorParsers aggParsers)
throws IOException { throws IOException {
XContentParser parser = XContentFactory.xContent(sourceBytes).createParser(sourceBytes); XContentParser parser = XContentFactory.xContent(sourceBytes).createParser(sourceBytes);
QueryParseContext queryParseContext = new QueryParseContext(queryRegistry); QueryParseContext queryParseContext = new QueryParseContext(queryRegistry);
queryParseContext.reset(parser); queryParseContext.reset(parser);
queryParseContext.parseFieldMatcher(parseFieldMatcher); queryParseContext.parseFieldMatcher(parseFieldMatcher);
return SearchSourceBuilder.parseSearchSource(parser, queryParseContext, aggParsers); source.parseXContent(parser, queryParseContext, aggParsers);
} }
/** /**

View File

@ -30,7 +30,7 @@ import org.elasticsearch.rest.RestStatus;
* A REST based action listener that assumes the response is of type {@link ToXContent} and automatically * A REST based action listener that assumes the response is of type {@link ToXContent} and automatically
* builds an XContent based response (wrapping the toXContent in startObject/endObject). * builds an XContent based response (wrapping the toXContent in startObject/endObject).
*/ */
public final class RestToXContentListener<Response extends ToXContent> extends RestResponseListener<Response> { public class RestToXContentListener<Response extends ToXContent> extends RestResponseListener<Response> {
public RestToXContentListener(RestChannel channel) { public RestToXContentListener(RestChannel channel) {
super(channel); super(channel);
@ -45,6 +45,10 @@ public final class RestToXContentListener<Response extends ToXContent> extends R
builder.startObject(); builder.startObject();
response.toXContent(builder, channel.request()); response.toXContent(builder, channel.request());
builder.endObject(); builder.endObject();
return new BytesRestResponse(RestStatus.OK, builder); return new BytesRestResponse(getStatus(response), builder);
}
protected RestStatus getStatus(Response response) {
return RestStatus.OK;
} }
} }

View File

@ -96,7 +96,7 @@ import org.elasticsearch.index.query.functionscore.random.RandomScoreFunctionPar
import org.elasticsearch.index.query.functionscore.script.ScriptScoreFunctionParser; import org.elasticsearch.index.query.functionscore.script.ScriptScoreFunctionParser;
import org.elasticsearch.index.query.functionscore.weight.WeightBuilder; import org.elasticsearch.index.query.functionscore.weight.WeightBuilder;
import org.elasticsearch.indices.query.IndicesQueriesRegistry; import org.elasticsearch.indices.query.IndicesQueriesRegistry;
import org.elasticsearch.search.action.SearchServiceTransportAction; import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.aggregations.AggregationBinaryParseElement; import org.elasticsearch.search.aggregations.AggregationBinaryParseElement;
import org.elasticsearch.search.aggregations.AggregationParseElement; import org.elasticsearch.search.aggregations.AggregationParseElement;
import org.elasticsearch.search.aggregations.AggregationPhase; import org.elasticsearch.search.aggregations.AggregationPhase;
@ -452,7 +452,7 @@ public class SearchModule extends AbstractModule {
bind(QueryPhase.class).asEagerSingleton(); bind(QueryPhase.class).asEagerSingleton();
bind(SearchPhaseController.class).asEagerSingleton(); bind(SearchPhaseController.class).asEagerSingleton();
bind(FetchPhase.class).asEagerSingleton(); bind(FetchPhase.class).asEagerSingleton();
bind(SearchServiceTransportAction.class).asEagerSingleton(); bind(SearchTransportService.class).asEagerSingleton();
if (searchServiceImpl == SearchService.class) { if (searchServiceImpl == SearchService.class) {
bind(SearchService.class).asEagerSingleton(); bind(SearchService.class).asEagerSingleton();
} else { } else {

View File

@ -23,7 +23,6 @@ import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.ActionListenerResponseHandler;
import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.OriginalIndices;
import org.elasticsearch.action.search.ClearScrollRequest;
import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
@ -58,7 +57,7 @@ import java.io.IOException;
* An encapsulation of {@link org.elasticsearch.search.SearchService} operations exposed through * An encapsulation of {@link org.elasticsearch.search.SearchService} operations exposed through
* transport. * transport.
*/ */
public class SearchServiceTransportAction extends AbstractComponent { public class SearchTransportService extends AbstractComponent {
public static final String FREE_CONTEXT_SCROLL_ACTION_NAME = "indices:data/read/search[free_context/scroll]"; public static final String FREE_CONTEXT_SCROLL_ACTION_NAME = "indices:data/read/search[free_context/scroll]";
public static final String FREE_CONTEXT_ACTION_NAME = "indices:data/read/search[free_context]"; public static final String FREE_CONTEXT_ACTION_NAME = "indices:data/read/search[free_context]";
@ -77,26 +76,39 @@ public class SearchServiceTransportAction extends AbstractComponent {
private final SearchService searchService; private final SearchService searchService;
@Inject @Inject
public SearchServiceTransportAction(Settings settings, TransportService transportService, SearchService searchService) { public SearchTransportService(Settings settings, TransportService transportService, SearchService searchService) {
super(settings); super(settings);
this.transportService = transportService; this.transportService = transportService;
this.searchService = searchService; this.searchService = searchService;
transportService.registerRequestHandler(FREE_CONTEXT_SCROLL_ACTION_NAME, ScrollFreeContextRequest::new, ThreadPool.Names.SAME, new FreeContextTransportHandler<>()); transportService.registerRequestHandler(FREE_CONTEXT_SCROLL_ACTION_NAME, ScrollFreeContextRequest::new, ThreadPool.Names.SAME,
transportService.registerRequestHandler(FREE_CONTEXT_ACTION_NAME, SearchFreeContextRequest::new, ThreadPool.Names.SAME, new FreeContextTransportHandler<SearchFreeContextRequest>()); new FreeContextTransportHandler<>());
transportService.registerRequestHandler(CLEAR_SCROLL_CONTEXTS_ACTION_NAME, ClearScrollContextsRequest::new, ThreadPool.Names.SAME, new ClearScrollContextsTransportHandler()); transportService.registerRequestHandler(FREE_CONTEXT_ACTION_NAME, SearchFreeContextRequest::new, ThreadPool.Names.SAME,
transportService.registerRequestHandler(DFS_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH, new SearchDfsTransportHandler()); new FreeContextTransportHandler<>());
transportService.registerRequestHandler(QUERY_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH, new SearchQueryTransportHandler()); transportService.registerRequestHandler(CLEAR_SCROLL_CONTEXTS_ACTION_NAME, ClearScrollContextsRequest::new, ThreadPool.Names.SAME,
transportService.registerRequestHandler(QUERY_ID_ACTION_NAME, QuerySearchRequest::new, ThreadPool.Names.SEARCH, new SearchQueryByIdTransportHandler()); new ClearScrollContextsTransportHandler());
transportService.registerRequestHandler(QUERY_SCROLL_ACTION_NAME, InternalScrollSearchRequest::new, ThreadPool.Names.SEARCH, new SearchQueryScrollTransportHandler()); transportService.registerRequestHandler(DFS_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH,
transportService.registerRequestHandler(QUERY_FETCH_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH, new SearchQueryFetchTransportHandler()); new SearchDfsTransportHandler());
transportService.registerRequestHandler(QUERY_QUERY_FETCH_ACTION_NAME, QuerySearchRequest::new, ThreadPool.Names.SEARCH, new SearchQueryQueryFetchTransportHandler()); transportService.registerRequestHandler(QUERY_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH,
transportService.registerRequestHandler(QUERY_FETCH_SCROLL_ACTION_NAME, InternalScrollSearchRequest::new, ThreadPool.Names.SEARCH, new SearchQueryFetchScrollTransportHandler()); new SearchQueryTransportHandler());
transportService.registerRequestHandler(FETCH_ID_SCROLL_ACTION_NAME, ShardFetchRequest::new, ThreadPool.Names.SEARCH, new FetchByIdTransportHandler<>()); transportService.registerRequestHandler(QUERY_ID_ACTION_NAME, QuerySearchRequest::new, ThreadPool.Names.SEARCH,
transportService.registerRequestHandler(FETCH_ID_ACTION_NAME, ShardFetchSearchRequest::new, ThreadPool.Names.SEARCH, new FetchByIdTransportHandler<ShardFetchSearchRequest>()); new SearchQueryByIdTransportHandler());
transportService.registerRequestHandler(QUERY_SCROLL_ACTION_NAME, InternalScrollSearchRequest::new, ThreadPool.Names.SEARCH,
new SearchQueryScrollTransportHandler());
transportService.registerRequestHandler(QUERY_FETCH_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH,
new SearchQueryFetchTransportHandler());
transportService.registerRequestHandler(QUERY_QUERY_FETCH_ACTION_NAME, QuerySearchRequest::new, ThreadPool.Names.SEARCH,
new SearchQueryQueryFetchTransportHandler());
transportService.registerRequestHandler(QUERY_FETCH_SCROLL_ACTION_NAME, InternalScrollSearchRequest::new, ThreadPool.Names.SEARCH,
new SearchQueryFetchScrollTransportHandler());
transportService.registerRequestHandler(FETCH_ID_SCROLL_ACTION_NAME, ShardFetchRequest::new, ThreadPool.Names.SEARCH,
new FetchByIdTransportHandler<>());
transportService.registerRequestHandler(FETCH_ID_ACTION_NAME, ShardFetchSearchRequest::new, ThreadPool.Names.SEARCH,
new FetchByIdTransportHandler<>());
} }
public void sendFreeContext(DiscoveryNode node, final long contextId, SearchRequest request) { public void sendFreeContext(DiscoveryNode node, final long contextId, SearchRequest request) {
transportService.sendRequest(node, FREE_CONTEXT_ACTION_NAME, new SearchFreeContextRequest(request, contextId), new ActionListenerResponseHandler<SearchFreeContextResponse>(new ActionListener<SearchFreeContextResponse>() { transportService.sendRequest(node, FREE_CONTEXT_ACTION_NAME, new SearchFreeContextRequest(request, contextId),
new ActionListenerResponseHandler<SearchFreeContextResponse>(new ActionListener<SearchFreeContextResponse>() {
@Override @Override
public void onResponse(SearchFreeContextResponse response) { public void onResponse(SearchFreeContextResponse response) {
// no need to respond if it was freed or not // no need to respond if it was freed or not
@ -114,8 +126,9 @@ public class SearchServiceTransportAction extends AbstractComponent {
}); });
} }
public void sendFreeContext(DiscoveryNode node, long contextId, ClearScrollRequest request, final ActionListener<SearchFreeContextResponse> listener) { public void sendFreeContext(DiscoveryNode node, long contextId, final ActionListener<SearchFreeContextResponse> listener) {
transportService.sendRequest(node, FREE_CONTEXT_SCROLL_ACTION_NAME, new ScrollFreeContextRequest(request, contextId), new ActionListenerResponseHandler<SearchFreeContextResponse>(listener) { transportService.sendRequest(node, FREE_CONTEXT_SCROLL_ACTION_NAME, new ScrollFreeContextRequest(contextId),
new ActionListenerResponseHandler<SearchFreeContextResponse>(listener) {
@Override @Override
public SearchFreeContextResponse newInstance() { public SearchFreeContextResponse newInstance() {
return new SearchFreeContextResponse(); return new SearchFreeContextResponse();
@ -123,8 +136,9 @@ public class SearchServiceTransportAction extends AbstractComponent {
}); });
} }
public void sendClearAllScrollContexts(DiscoveryNode node, ClearScrollRequest request, final ActionListener<TransportResponse> listener) { public void sendClearAllScrollContexts(DiscoveryNode node, final ActionListener<TransportResponse> listener) {
transportService.sendRequest(node, CLEAR_SCROLL_CONTEXTS_ACTION_NAME, new ClearScrollContextsRequest(), new ActionListenerResponseHandler<TransportResponse>(listener) { transportService.sendRequest(node, CLEAR_SCROLL_CONTEXTS_ACTION_NAME, new ClearScrollContextsRequest(),
new ActionListenerResponseHandler<TransportResponse>(listener) {
@Override @Override
public TransportResponse newInstance() { public TransportResponse newInstance() {
return TransportResponse.Empty.INSTANCE; return TransportResponse.Empty.INSTANCE;
@ -132,7 +146,8 @@ public class SearchServiceTransportAction extends AbstractComponent {
}); });
} }
public void sendExecuteDfs(DiscoveryNode node, final ShardSearchTransportRequest request, final ActionListener<DfsSearchResult> listener) { public void sendExecuteDfs(DiscoveryNode node, final ShardSearchTransportRequest request,
final ActionListener<DfsSearchResult> listener) {
transportService.sendRequest(node, DFS_ACTION_NAME, request, new ActionListenerResponseHandler<DfsSearchResult>(listener) { transportService.sendRequest(node, DFS_ACTION_NAME, request, new ActionListenerResponseHandler<DfsSearchResult>(listener) {
@Override @Override
public DfsSearchResult newInstance() { public DfsSearchResult newInstance() {
@ -141,8 +156,10 @@ public class SearchServiceTransportAction extends AbstractComponent {
}); });
} }
public void sendExecuteQuery(DiscoveryNode node, final ShardSearchTransportRequest request, final ActionListener<QuerySearchResultProvider> listener) { public void sendExecuteQuery(DiscoveryNode node, final ShardSearchTransportRequest request,
transportService.sendRequest(node, QUERY_ACTION_NAME, request, new ActionListenerResponseHandler<QuerySearchResultProvider>(listener) { final ActionListener<QuerySearchResultProvider> listener) {
transportService.sendRequest(node, QUERY_ACTION_NAME, request,
new ActionListenerResponseHandler<QuerySearchResultProvider>(listener) {
@Override @Override
public QuerySearchResult newInstance() { public QuerySearchResult newInstance() {
return new QuerySearchResult(); return new QuerySearchResult();
@ -159,8 +176,10 @@ public class SearchServiceTransportAction extends AbstractComponent {
}); });
} }
public void sendExecuteQuery(DiscoveryNode node, final InternalScrollSearchRequest request, final ActionListener<ScrollQuerySearchResult> listener) { public void sendExecuteQuery(DiscoveryNode node, final InternalScrollSearchRequest request,
transportService.sendRequest(node, QUERY_SCROLL_ACTION_NAME, request, new ActionListenerResponseHandler<ScrollQuerySearchResult>(listener) { final ActionListener<ScrollQuerySearchResult> listener) {
transportService.sendRequest(node, QUERY_SCROLL_ACTION_NAME, request,
new ActionListenerResponseHandler<ScrollQuerySearchResult>(listener) {
@Override @Override
public ScrollQuerySearchResult newInstance() { public ScrollQuerySearchResult newInstance() {
return new ScrollQuerySearchResult(); return new ScrollQuerySearchResult();
@ -168,8 +187,10 @@ public class SearchServiceTransportAction extends AbstractComponent {
}); });
} }
public void sendExecuteFetch(DiscoveryNode node, final ShardSearchTransportRequest request, final ActionListener<QueryFetchSearchResult> listener) { public void sendExecuteFetch(DiscoveryNode node, final ShardSearchTransportRequest request,
transportService.sendRequest(node, QUERY_FETCH_ACTION_NAME, request, new ActionListenerResponseHandler<QueryFetchSearchResult>(listener) { final ActionListener<QueryFetchSearchResult> listener) {
transportService.sendRequest(node, QUERY_FETCH_ACTION_NAME, request,
new ActionListenerResponseHandler<QueryFetchSearchResult>(listener) {
@Override @Override
public QueryFetchSearchResult newInstance() { public QueryFetchSearchResult newInstance() {
return new QueryFetchSearchResult(); return new QueryFetchSearchResult();
@ -177,8 +198,10 @@ public class SearchServiceTransportAction extends AbstractComponent {
}); });
} }
public void sendExecuteFetch(DiscoveryNode node, final QuerySearchRequest request, final ActionListener<QueryFetchSearchResult> listener) { public void sendExecuteFetch(DiscoveryNode node, final QuerySearchRequest request,
transportService.sendRequest(node, QUERY_QUERY_FETCH_ACTION_NAME, request, new ActionListenerResponseHandler<QueryFetchSearchResult>(listener) { final ActionListener<QueryFetchSearchResult> listener) {
transportService.sendRequest(node, QUERY_QUERY_FETCH_ACTION_NAME, request,
new ActionListenerResponseHandler<QueryFetchSearchResult>(listener) {
@Override @Override
public QueryFetchSearchResult newInstance() { public QueryFetchSearchResult newInstance() {
return new QueryFetchSearchResult(); return new QueryFetchSearchResult();
@ -186,8 +209,10 @@ public class SearchServiceTransportAction extends AbstractComponent {
}); });
} }
public void sendExecuteFetch(DiscoveryNode node, final InternalScrollSearchRequest request, final ActionListener<ScrollQueryFetchSearchResult> listener) { public void sendExecuteFetch(DiscoveryNode node, final InternalScrollSearchRequest request,
transportService.sendRequest(node, QUERY_FETCH_SCROLL_ACTION_NAME, request, new ActionListenerResponseHandler<ScrollQueryFetchSearchResult>(listener) { final ActionListener<ScrollQueryFetchSearchResult> listener) {
transportService.sendRequest(node, QUERY_FETCH_SCROLL_ACTION_NAME, request,
new ActionListenerResponseHandler<ScrollQueryFetchSearchResult>(listener) {
@Override @Override
public ScrollQueryFetchSearchResult newInstance() { public ScrollQueryFetchSearchResult newInstance() {
return new ScrollQueryFetchSearchResult(); return new ScrollQueryFetchSearchResult();
@ -195,15 +220,18 @@ public class SearchServiceTransportAction extends AbstractComponent {
}); });
} }
public void sendExecuteFetch(DiscoveryNode node, final ShardFetchSearchRequest request, final ActionListener<FetchSearchResult> listener) { public void sendExecuteFetch(DiscoveryNode node, final ShardFetchSearchRequest request,
final ActionListener<FetchSearchResult> listener) {
sendExecuteFetch(node, FETCH_ID_ACTION_NAME, request, listener); sendExecuteFetch(node, FETCH_ID_ACTION_NAME, request, listener);
} }
public void sendExecuteFetchScroll(DiscoveryNode node, final ShardFetchRequest request, final ActionListener<FetchSearchResult> listener) { public void sendExecuteFetchScroll(DiscoveryNode node, final ShardFetchRequest request,
final ActionListener<FetchSearchResult> listener) {
sendExecuteFetch(node, FETCH_ID_SCROLL_ACTION_NAME, request, listener); sendExecuteFetch(node, FETCH_ID_SCROLL_ACTION_NAME, request, listener);
} }
private void sendExecuteFetch(DiscoveryNode node, String action, final ShardFetchRequest request, final ActionListener<FetchSearchResult> listener) { private void sendExecuteFetch(DiscoveryNode node, String action, final ShardFetchRequest request,
final ActionListener<FetchSearchResult> listener) {
transportService.sendRequest(node, action, request, new ActionListenerResponseHandler<FetchSearchResult>(listener) { transportService.sendRequest(node, action, request, new ActionListenerResponseHandler<FetchSearchResult>(listener) {
@Override @Override
public FetchSearchResult newInstance() { public FetchSearchResult newInstance() {
@ -212,17 +240,13 @@ public class SearchServiceTransportAction extends AbstractComponent {
}); });
} }
public static class ScrollFreeContextRequest extends TransportRequest { static class ScrollFreeContextRequest extends TransportRequest {
private long id; private long id;
public ScrollFreeContextRequest() { ScrollFreeContextRequest() {
} }
ScrollFreeContextRequest(ClearScrollRequest request, long id) { ScrollFreeContextRequest(long id) {
this(id);
}
private ScrollFreeContextRequest(long id) {
this.id = id; this.id = id;
} }
@ -243,7 +267,7 @@ public class SearchServiceTransportAction extends AbstractComponent {
} }
} }
public static class SearchFreeContextRequest extends ScrollFreeContextRequest implements IndicesRequest { static class SearchFreeContextRequest extends ScrollFreeContextRequest implements IndicesRequest {
private OriginalIndices originalIndices; private OriginalIndices originalIndices;
public SearchFreeContextRequest() { public SearchFreeContextRequest() {
@ -311,7 +335,8 @@ public class SearchServiceTransportAction extends AbstractComponent {
} }
} }
class FreeContextTransportHandler<FreeContextRequest extends ScrollFreeContextRequest> implements TransportRequestHandler<FreeContextRequest> { class FreeContextTransportHandler<FreeContextRequest extends ScrollFreeContextRequest>
implements TransportRequestHandler<FreeContextRequest> {
@Override @Override
public void messageReceived(FreeContextRequest request, TransportChannel channel) throws Exception { public void messageReceived(FreeContextRequest request, TransportChannel channel) throws Exception {
boolean freed = searchService.freeContext(request.id()); boolean freed = searchService.freeContext(request.id());
@ -319,7 +344,7 @@ public class SearchServiceTransportAction extends AbstractComponent {
} }
} }
public static class ClearScrollContextsRequest extends TransportRequest { static class ClearScrollContextsRequest extends TransportRequest {
} }
class ClearScrollContextsTransportHandler implements TransportRequestHandler<ClearScrollContextsRequest> { class ClearScrollContextsTransportHandler implements TransportRequestHandler<ClearScrollContextsRequest> {
@ -393,5 +418,4 @@ public class SearchServiceTransportAction extends AbstractComponent {
channel.sendResponse(result); channel.sendResponse(result);
} }
} }
} }

View File

@ -734,9 +734,22 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
return ext; return ext;
} }
/**
* Create a new SearchSourceBuilder with attributes set by an xContent.
*/
public SearchSourceBuilder fromXContent(XContentParser parser, QueryParseContext context, AggregatorParsers aggParsers) public SearchSourceBuilder fromXContent(XContentParser parser, QueryParseContext context, AggregatorParsers aggParsers)
throws IOException { throws IOException {
SearchSourceBuilder builder = new SearchSourceBuilder(); SearchSourceBuilder builder = new SearchSourceBuilder();
builder.parseXContent(parser, context, aggParsers);
return builder;
}
/**
* Parse some xContent into this SearchSourceBuilder, overwriting any values specified in the xContent. Use this if you need to set up
* different defaults than a regular SearchSourceBuilder would have and use
* {@link #fromXContent(XContentParser, QueryParseContext, AggregatorParsers)} if you have normal defaults.
*/
public void parseXContent(XContentParser parser, QueryParseContext context, AggregatorParsers aggParsers) throws IOException {
XContentParser.Token token = parser.currentToken(); XContentParser.Token token = parser.currentToken();
String currentFieldName = null; String currentFieldName = null;
if (token != XContentParser.Token.START_OBJECT && (token = parser.nextToken()) != XContentParser.Token.START_OBJECT) { if (token != XContentParser.Token.START_OBJECT && (token = parser.nextToken()) != XContentParser.Token.START_OBJECT) {
@ -748,44 +761,42 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
currentFieldName = parser.currentName(); currentFieldName = parser.currentName();
} else if (token.isValue()) { } else if (token.isValue()) {
if (context.parseFieldMatcher().match(currentFieldName, FROM_FIELD)) { if (context.parseFieldMatcher().match(currentFieldName, FROM_FIELD)) {
builder.from = parser.intValue(); from = parser.intValue();
} else if (context.parseFieldMatcher().match(currentFieldName, SIZE_FIELD)) { } else if (context.parseFieldMatcher().match(currentFieldName, SIZE_FIELD)) {
builder.size = parser.intValue(); size = parser.intValue();
} else if (context.parseFieldMatcher().match(currentFieldName, TIMEOUT_FIELD)) { } else if (context.parseFieldMatcher().match(currentFieldName, TIMEOUT_FIELD)) {
builder.timeoutInMillis = parser.longValue(); timeoutInMillis = parser.longValue();
} else if (context.parseFieldMatcher().match(currentFieldName, TERMINATE_AFTER_FIELD)) { } else if (context.parseFieldMatcher().match(currentFieldName, TERMINATE_AFTER_FIELD)) {
builder.terminateAfter = parser.intValue(); terminateAfter = parser.intValue();
} else if (context.parseFieldMatcher().match(currentFieldName, MIN_SCORE_FIELD)) { } else if (context.parseFieldMatcher().match(currentFieldName, MIN_SCORE_FIELD)) {
builder.minScore = parser.floatValue(); minScore = parser.floatValue();
} else if (context.parseFieldMatcher().match(currentFieldName, VERSION_FIELD)) { } else if (context.parseFieldMatcher().match(currentFieldName, VERSION_FIELD)) {
builder.version = parser.booleanValue(); version = parser.booleanValue();
} else if (context.parseFieldMatcher().match(currentFieldName, EXPLAIN_FIELD)) { } else if (context.parseFieldMatcher().match(currentFieldName, EXPLAIN_FIELD)) {
builder.explain = parser.booleanValue(); explain = parser.booleanValue();
} else if (context.parseFieldMatcher().match(currentFieldName, TRACK_SCORES_FIELD)) { } else if (context.parseFieldMatcher().match(currentFieldName, TRACK_SCORES_FIELD)) {
builder.trackScores = parser.booleanValue(); trackScores = parser.booleanValue();
} else if (context.parseFieldMatcher().match(currentFieldName, _SOURCE_FIELD)) { } else if (context.parseFieldMatcher().match(currentFieldName, _SOURCE_FIELD)) {
builder.fetchSourceContext = FetchSourceContext.parse(parser, context); fetchSourceContext = FetchSourceContext.parse(parser, context);
} else if (context.parseFieldMatcher().match(currentFieldName, FIELDS_FIELD)) { } else if (context.parseFieldMatcher().match(currentFieldName, FIELDS_FIELD)) {
List<String> fieldNames = new ArrayList<>();
fieldNames.add(parser.text()); fieldNames.add(parser.text());
builder.fieldNames = fieldNames;
} else if (context.parseFieldMatcher().match(currentFieldName, SORT_FIELD)) { } else if (context.parseFieldMatcher().match(currentFieldName, SORT_FIELD)) {
builder.sort(parser.text()); sort(parser.text());
} else if (context.parseFieldMatcher().match(currentFieldName, PROFILE_FIELD)) { } else if (context.parseFieldMatcher().match(currentFieldName, PROFILE_FIELD)) {
builder.profile = parser.booleanValue(); profile = parser.booleanValue();
} else { } else {
throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName + "].", throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName + "].",
parser.getTokenLocation()); parser.getTokenLocation());
} }
} else if (token == XContentParser.Token.START_OBJECT) { } else if (token == XContentParser.Token.START_OBJECT) {
if (context.parseFieldMatcher().match(currentFieldName, QUERY_FIELD)) { if (context.parseFieldMatcher().match(currentFieldName, QUERY_FIELD)) {
builder.queryBuilder = context.parseInnerQueryBuilder(); queryBuilder = context.parseInnerQueryBuilder();
} else if (context.parseFieldMatcher().match(currentFieldName, POST_FILTER_FIELD)) { } else if (context.parseFieldMatcher().match(currentFieldName, POST_FILTER_FIELD)) {
builder.postQueryBuilder = context.parseInnerQueryBuilder(); postQueryBuilder = context.parseInnerQueryBuilder();
} else if (context.parseFieldMatcher().match(currentFieldName, _SOURCE_FIELD)) { } else if (context.parseFieldMatcher().match(currentFieldName, _SOURCE_FIELD)) {
builder.fetchSourceContext = FetchSourceContext.parse(parser, context); fetchSourceContext = FetchSourceContext.parse(parser, context);
} else if (context.parseFieldMatcher().match(currentFieldName, SCRIPT_FIELDS_FIELD)) { } else if (context.parseFieldMatcher().match(currentFieldName, SCRIPT_FIELDS_FIELD)) {
List<ScriptField> scriptFields = new ArrayList<>(); scriptFields = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
String scriptFieldName = parser.currentName(); String scriptFieldName = parser.currentName();
token = parser.nextToken(); token = parser.nextToken();
@ -822,9 +833,8 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
+ currentFieldName + "] but found [" + token + "]", parser.getTokenLocation()); + currentFieldName + "] but found [" + token + "]", parser.getTokenLocation());
} }
} }
builder.scriptFields = scriptFields;
} else if (context.parseFieldMatcher().match(currentFieldName, INDICES_BOOST_FIELD)) { } else if (context.parseFieldMatcher().match(currentFieldName, INDICES_BOOST_FIELD)) {
ObjectFloatHashMap<String> indexBoost = new ObjectFloatHashMap<String>(); indexBoost = new ObjectFloatHashMap<String>();
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) { if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName(); currentFieldName = parser.currentName();
@ -835,25 +845,23 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
parser.getTokenLocation()); parser.getTokenLocation());
} }
} }
builder.indexBoost = indexBoost;
} else if (context.parseFieldMatcher().match(currentFieldName, AGGREGATIONS_FIELD)) { } else if (context.parseFieldMatcher().match(currentFieldName, AGGREGATIONS_FIELD)) {
builder.aggregations = aggParsers.parseAggregators(parser, context); aggregations = aggParsers.parseAggregators(parser, context);
} else if (context.parseFieldMatcher().match(currentFieldName, HIGHLIGHT_FIELD)) { } else if (context.parseFieldMatcher().match(currentFieldName, HIGHLIGHT_FIELD)) {
builder.highlightBuilder = HighlightBuilder.PROTOTYPE.fromXContent(context); highlightBuilder = HighlightBuilder.PROTOTYPE.fromXContent(context);
} else if (context.parseFieldMatcher().match(currentFieldName, INNER_HITS_FIELD)) { } else if (context.parseFieldMatcher().match(currentFieldName, INNER_HITS_FIELD)) {
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser);
builder.innerHitsBuilder = xContentBuilder.bytes(); innerHitsBuilder = xContentBuilder.bytes();
} else if (context.parseFieldMatcher().match(currentFieldName, SUGGEST_FIELD)) { } else if (context.parseFieldMatcher().match(currentFieldName, SUGGEST_FIELD)) {
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser);
builder.suggestBuilder = xContentBuilder.bytes(); suggestBuilder = xContentBuilder.bytes();
} else if (context.parseFieldMatcher().match(currentFieldName, SORT_FIELD)) { } else if (context.parseFieldMatcher().match(currentFieldName, SORT_FIELD)) {
List<BytesReference> sorts = new ArrayList<>(); sorts = new ArrayList<>();
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser);
sorts.add(xContentBuilder.bytes()); sorts.add(xContentBuilder.bytes());
builder.sorts = sorts;
} else if (context.parseFieldMatcher().match(currentFieldName, EXT_FIELD)) { } else if (context.parseFieldMatcher().match(currentFieldName, EXT_FIELD)) {
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser);
builder.ext = xContentBuilder.bytes(); ext = xContentBuilder.bytes();
} else { } else {
throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName + "].", throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName + "].",
parser.getTokenLocation()); parser.getTokenLocation());
@ -861,7 +869,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
} else if (token == XContentParser.Token.START_ARRAY) { } else if (token == XContentParser.Token.START_ARRAY) {
if (context.parseFieldMatcher().match(currentFieldName, FIELDS_FIELD)) { if (context.parseFieldMatcher().match(currentFieldName, FIELDS_FIELD)) {
List<String> fieldNames = new ArrayList<>(); fieldNames = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token == XContentParser.Token.VALUE_STRING) { if (token == XContentParser.Token.VALUE_STRING) {
fieldNames.add(parser.text()); fieldNames.add(parser.text());
@ -870,9 +878,8 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
+ currentFieldName + "] but found [" + token + "]", parser.getTokenLocation()); + currentFieldName + "] but found [" + token + "]", parser.getTokenLocation());
} }
} }
builder.fieldNames = fieldNames;
} else if (context.parseFieldMatcher().match(currentFieldName, FIELDDATA_FIELDS_FIELD)) { } else if (context.parseFieldMatcher().match(currentFieldName, FIELDDATA_FIELDS_FIELD)) {
List<String> fieldDataFields = new ArrayList<>(); fieldDataFields = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token == XContentParser.Token.VALUE_STRING) { if (token == XContentParser.Token.VALUE_STRING) {
fieldDataFields.add(parser.text()); fieldDataFields.add(parser.text());
@ -881,22 +888,19 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
+ currentFieldName + "] but found [" + token + "]", parser.getTokenLocation()); + currentFieldName + "] but found [" + token + "]", parser.getTokenLocation());
} }
} }
builder.fieldDataFields = fieldDataFields;
} else if (context.parseFieldMatcher().match(currentFieldName, SORT_FIELD)) { } else if (context.parseFieldMatcher().match(currentFieldName, SORT_FIELD)) {
List<BytesReference> sorts = new ArrayList<>(); sorts = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser);
sorts.add(xContentBuilder.bytes()); sorts.add(xContentBuilder.bytes());
} }
builder.sorts = sorts;
} else if (context.parseFieldMatcher().match(currentFieldName, RESCORE_FIELD)) { } else if (context.parseFieldMatcher().match(currentFieldName, RESCORE_FIELD)) {
List<RescoreBuilder<?>> rescoreBuilders = new ArrayList<>(); rescoreBuilders = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
rescoreBuilders.add(RescoreBuilder.parseFromXContent(context)); rescoreBuilders.add(RescoreBuilder.parseFromXContent(context));
} }
builder.rescoreBuilders = rescoreBuilders;
} else if (context.parseFieldMatcher().match(currentFieldName, STATS_FIELD)) { } else if (context.parseFieldMatcher().match(currentFieldName, STATS_FIELD)) {
List<String> stats = new ArrayList<>(); stats = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token == XContentParser.Token.VALUE_STRING) { if (token == XContentParser.Token.VALUE_STRING) {
stats.add(parser.text()); stats.add(parser.text());
@ -905,11 +909,10 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
+ currentFieldName + "] but found [" + token + "]", parser.getTokenLocation()); + currentFieldName + "] but found [" + token + "]", parser.getTokenLocation());
} }
} }
builder.stats = stats;
} else if (context.parseFieldMatcher().match(currentFieldName, _SOURCE_FIELD)) { } else if (context.parseFieldMatcher().match(currentFieldName, _SOURCE_FIELD)) {
builder.fetchSourceContext = FetchSourceContext.parse(parser, context); fetchSourceContext = FetchSourceContext.parse(parser, context);
} else if (context.parseFieldMatcher().match(currentFieldName, SEARCH_AFTER)) { } else if (context.parseFieldMatcher().match(currentFieldName, SEARCH_AFTER)) {
builder.searchAfterBuilder = SearchAfterBuilder.PROTOTYPE.fromXContent(parser, context.parseFieldMatcher()); searchAfterBuilder = SearchAfterBuilder.PROTOTYPE.fromXContent(parser, context.parseFieldMatcher());
} else { } else {
throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName + "].", throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName + "].",
parser.getTokenLocation()); parser.getTokenLocation());
@ -919,7 +922,6 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
parser.getTokenLocation()); parser.getTokenLocation());
} }
} }
return builder;
} }
@Override @Override

View File

@ -19,6 +19,8 @@
package org.elasticsearch.tasks; package org.elasticsearch.tasks;
import org.elasticsearch.common.Nullable;
import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.AtomicReference;
/** /**
@ -56,4 +58,11 @@ public class CancellableTask extends Task {
return reason.get() != null; return reason.get() != null;
} }
/**
* The reason the task was cancelled or null if it hasn't been cancelled.
*/
@Nullable
public String getReasonCancelled() {
return reason.get();
}
} }

View File

@ -0,0 +1,54 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.tasks;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
/**
* A TaskListener that just logs the response at the info level. Used when we
* need a listener but aren't returning the result to the user.
*/
public final class LoggingTaskListener<Response> implements TaskListener<Response> {
private final static ESLogger logger = Loggers.getLogger(LoggingTaskListener.class);
/**
* Get the instance of NoopActionListener cast appropriately.
*/
@SuppressWarnings("unchecked") // Safe because we only toString the response
public static <Response> TaskListener<Response> instance() {
return (TaskListener<Response>) INSTANCE;
}
private static final LoggingTaskListener<Object> INSTANCE = new LoggingTaskListener<Object>();
private LoggingTaskListener() {
}
@Override
public void onResponse(Task task, Response response) {
logger.info("{} finished with response {}", task.getId(), response);
}
@Override
public void onFailure(Task task, Throwable e) {
logger.warn("{} failed with exception", e, task.getId());
}
}

View File

@ -0,0 +1,49 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.tasks;
/**
* Listener for Task success or failure.
*/
public interface TaskListener<Response> {
/**
* Handle task response. This response may constitute a failure or a success
* but it is up to the listener to make that decision.
*
* @param task
* the task being executed. May be null if the action doesn't
* create a task
* @param response
* the response from the action that executed the task
*/
void onResponse(Task task, Response response);
/**
* A failure caused by an exception at some phase of the task.
*
* @param task
* the task being executed. May be null if the action doesn't
* create a task
* @param e
* the failure
*/
void onFailure(Task task, Throwable e);
}

View File

@ -35,4 +35,8 @@ public class BindTransportException extends TransportException {
public BindTransportException(String message, Throwable cause) { public BindTransportException(String message, Throwable cause) {
super(message, cause); super(message, cause);
} }
}
public BindTransportException(String message) {
super(message);
}
}

View File

@ -19,6 +19,8 @@
package org.elasticsearch.transport.netty; package org.elasticsearch.transport.netty;
import com.carrotsearch.hppc.IntHashSet;
import com.carrotsearch.hppc.IntSet;
import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
@ -535,8 +537,16 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
throw new BindTransportException("Failed to resolve publish address", e); throw new BindTransportException("Failed to resolve publish address", e);
} }
final int publishPort = resolvePublishPort(name, settings, profileSettings, boundAddresses, publishInetAddress);
final TransportAddress publishAddress = new InetSocketTransportAddress(new InetSocketAddress(publishInetAddress, publishPort));
return new BoundTransportAddress(transportBoundAddresses, publishAddress);
}
// package private for tests
static int resolvePublishPort(String profileName, Settings settings, Settings profileSettings, List<InetSocketAddress> boundAddresses,
InetAddress publishInetAddress) {
int publishPort; int publishPort;
if (TransportSettings.DEFAULT_PROFILE.equals(name)) { if (TransportSettings.DEFAULT_PROFILE.equals(profileName)) {
publishPort = TransportSettings.PUBLISH_PORT.get(settings); publishPort = TransportSettings.PUBLISH_PORT.get(settings);
} else { } else {
publishPort = profileSettings.getAsInt("publish_port", -1); publishPort = profileSettings.getAsInt("publish_port", -1);
@ -553,17 +563,25 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
} }
} }
// if port still not matches, just take port of first bound address // if no matching boundAddress found, check if there is a unique port for all bound addresses
if (publishPort < 0) { if (publishPort < 0) {
// TODO: In case of DEFAULT_PROFILE we should probably fail here, as publish address does not match any bound address final IntSet ports = new IntHashSet();
// In case of a custom profile, we might use the publish address of the default profile for (InetSocketAddress boundAddress : boundAddresses) {
publishPort = boundAddresses.get(0).getPort(); ports.add(boundAddress.getPort());
logger.warn("Publish port not found by matching publish address [{}] to bound addresses [{}], " }
+ "falling back to port [{}] of first bound address", publishInetAddress, boundAddresses, publishPort); if (ports.size() == 1) {
publishPort = ports.iterator().next().value;
}
} }
final TransportAddress publishAddress = new InetSocketTransportAddress(new InetSocketAddress(publishInetAddress, publishPort)); if (publishPort < 0) {
return new BoundTransportAddress(transportBoundAddresses, publishAddress); String profileExplanation = TransportSettings.DEFAULT_PROFILE.equals(profileName) ? "" : " for profile " + profileName;
throw new BindTransportException("Failed to auto-resolve publish port" + profileExplanation + ", multiple bound addresses " +
boundAddresses + " with distinct ports and none of them matched the publish address (" + publishInetAddress + "). " +
"Please specify a unique port by setting " + TransportSettings.PORT.getKey() + " or " +
TransportSettings.PUBLISH_PORT.getKey());
}
return publishPort;
} }
private void createServerBootstrap(String name, Settings settings) { private void createServerBootstrap(String name, Settings settings) {

View File

@ -55,5 +55,5 @@ OPTIONS
-v,--verbose Verbose output -v,--verbose Verbose output
-h,--help Shows this message -h,--help Shows this message
-b,--batch Enable batch mode explicitly, automatic confirmation of security permissions -b,--batch Enable batch mode explicitly, automatic confirmation of security permissions

View File

@ -22,8 +22,8 @@ package org.elasticsearch.action.admin.indices.create;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.rest.NoOpClient;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.client.NoOpClient;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;

View File

@ -0,0 +1,36 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.bulk;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.test.ESTestCase;
import static org.apache.lucene.util.TestUtil.randomSimpleString;
public class BulkShardRequestTests extends ESTestCase {
public void testToString() {
String index = randomSimpleString(getRandom(), 10);
int count = between(1, 100);
BulkShardRequest r = new BulkShardRequest(null, new ShardId(index, "ignored", 0), false, new BulkItemRequest[count]);
assertEquals("BulkShardRequest to [" + index + "] containing [" + count + "] requests", r.toString());
r = new BulkShardRequest(null, new ShardId(index, "ignored", 0), true, new BulkItemRequest[count]);
assertEquals("BulkShardRequest to [" + index + "] containing [" + count + "] requests and a refresh", r.toString());
}
}

View File

@ -25,8 +25,8 @@ import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.rest.NoOpClient;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.client.NoOpClient;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;

View File

@ -23,8 +23,8 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.rest.NoOpClient;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.client.NoOpClient;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;

View File

@ -0,0 +1,375 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.MapBuilder;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.DummyTransportAddress;
import org.elasticsearch.test.ESTestCase;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import static org.hamcrest.Matchers.equalTo;
/**
* Tests for the {@link ClusterChangedEvent} class.
*/
public class ClusterChangedEventTests extends ESTestCase {
private static final ClusterName TEST_CLUSTER_NAME = new ClusterName("test");
private static final int INDICES_CHANGE_NUM_TESTS = 5;
private static final String NODE_ID_PREFIX = "node_";
private static final String INITIAL_CLUSTER_ID = Strings.randomBase64UUID();
// the initial indices which every cluster state test starts out with
private static final List<String> initialIndices = Arrays.asList("idx1", "idx2", "idx3");
// index settings
private static final Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
/**
* Test basic properties of the ClusterChangedEvent class:
* (1) make sure there are no null values for any of its properties
* (2) make sure you can't create a ClusterChangedEvent with any null values
*/
public void testBasicProperties() {
ClusterState newState = createSimpleClusterState();
ClusterState previousState = createSimpleClusterState();
ClusterChangedEvent event = new ClusterChangedEvent("_na_", newState, previousState);
assertThat(event.source(), equalTo("_na_"));
assertThat(event.state(), equalTo(newState));
assertThat(event.previousState(), equalTo(previousState));
assertNotNull("nodesDelta should not be null", event.nodesDelta());
// should not be able to create a ClusterChangedEvent with null values for any of the constructor args
try {
event = new ClusterChangedEvent(null, newState, previousState);
fail("should not have created a ClusterChangedEvent from a null source: " + event.source());
} catch (NullPointerException e) {
}
try {
event = new ClusterChangedEvent("_na_", null, previousState);
fail("should not have created a ClusterChangedEvent from a null state: " + event.state());
} catch (NullPointerException e) {
}
try {
event = new ClusterChangedEvent("_na_", newState, null);
fail("should not have created a ClusterChangedEvent from a null previousState: " + event.previousState());
} catch (NullPointerException e) {
}
}
/**
* Test whether the ClusterChangedEvent returns the correct value for whether the local node is master,
* based on what was set on the cluster state.
*/
public void testLocalNodeIsMaster() {
final int numNodesInCluster = 3;
ClusterState previousState = createSimpleClusterState();
ClusterState newState = createState(numNodesInCluster, true, initialIndices);
ClusterChangedEvent event = new ClusterChangedEvent("_na_", newState, previousState);
assertTrue("local node should be master", event.localNodeMaster());
newState = createState(numNodesInCluster, false, initialIndices);
event = new ClusterChangedEvent("_na_", newState, previousState);
assertFalse("local node should not be master", event.localNodeMaster());
}
/**
* Test that the indices created and indices deleted lists between two cluster states
* are correct when there is no change in the cluster UUID. Also tests metadata equality
* between cluster states.
*/
public void testMetaDataChangesOnNoMasterChange() {
metaDataChangesCheck(false);
}
/**
* Test that the indices created and indices deleted lists between two cluster states
* are correct when there is a change in the cluster UUID. Also tests metadata equality
* between cluster states.
*/
public void testMetaDataChangesOnNewClusterUUID() {
metaDataChangesCheck(true);
}
/**
* Test the index metadata change check.
*/
public void testIndexMetaDataChange() {
final int numNodesInCluster = 3;
final ClusterState originalState = createState(numNodesInCluster, randomBoolean(), initialIndices);
final ClusterState newState = originalState; // doesn't matter for this test, just need a non-null value
final ClusterChangedEvent event = new ClusterChangedEvent("_na_", originalState, newState);
// test when its not the same IndexMetaData
final String indexId = initialIndices.get(0);
final IndexMetaData originalIndexMeta = originalState.metaData().index(indexId);
// make sure the metadata is actually on the cluster state
assertNotNull("IndexMetaData for " + indexId + " should exist on the cluster state", originalIndexMeta);
IndexMetaData newIndexMeta = createIndexMetadata(indexId, originalIndexMeta.getVersion() + 1);
assertTrue("IndexMetaData with different version numbers must be considered changed", event.indexMetaDataChanged(newIndexMeta));
// test when it doesn't exist
newIndexMeta = createIndexMetadata("doesntexist");
assertTrue("IndexMetaData that didn't previously exist should be considered changed", event.indexMetaDataChanged(newIndexMeta));
// test when its the same IndexMetaData
assertFalse("IndexMetaData should be the same", event.indexMetaDataChanged(originalIndexMeta));
}
/**
* Test nodes added/removed/changed checks.
*/
public void testNodesAddedAndRemovedAndChanged() {
final int numNodesInCluster = 4;
final ClusterState originalState = createState(numNodesInCluster, randomBoolean(), initialIndices);
// test when nodes have not been added or removed between cluster states
ClusterState newState = createState(numNodesInCluster, randomBoolean(), initialIndices);
ClusterChangedEvent event = new ClusterChangedEvent("_na_", newState, originalState);
assertFalse("Nodes should not have been added between cluster states", event.nodesAdded());
assertFalse("Nodes should not have been removed between cluster states", event.nodesRemoved());
assertFalse("Nodes should not have been changed between cluster states", event.nodesChanged());
// test when nodes have been removed between cluster states
newState = createState(numNodesInCluster - 1, randomBoolean(), initialIndices);
event = new ClusterChangedEvent("_na_", newState, originalState);
assertTrue("Nodes should have been removed between cluster states", event.nodesRemoved());
assertFalse("Nodes should not have been added between cluster states", event.nodesAdded());
assertTrue("Nodes should have been changed between cluster states", event.nodesChanged());
// test when nodes have been added between cluster states
newState = createState(numNodesInCluster + 1, randomBoolean(), initialIndices);
event = new ClusterChangedEvent("_na_", newState, originalState);
assertFalse("Nodes should not have been removed between cluster states", event.nodesRemoved());
assertTrue("Nodes should have been added between cluster states", event.nodesAdded());
assertTrue("Nodes should have been changed between cluster states", event.nodesChanged());
// test when nodes both added and removed between cluster states
// here we reuse the newState from the previous run which already added extra nodes
newState = nextState(newState, randomBoolean(), Collections.emptyList(), Collections.emptyList(), 1);
event = new ClusterChangedEvent("_na_", newState, originalState);
assertTrue("Nodes should have been removed between cluster states", event.nodesRemoved());
assertTrue("Nodes should have been added between cluster states", event.nodesAdded());
assertTrue("Nodes should have been changed between cluster states", event.nodesChanged());
}
/**
* Test the routing table changes checks.
*/
public void testRoutingTableChanges() {
final int numNodesInCluster = 3;
final ClusterState originalState = createState(numNodesInCluster, randomBoolean(), initialIndices);
// routing tables and index routing tables are same object
ClusterState newState = ClusterState.builder(originalState).build();
ClusterChangedEvent event = new ClusterChangedEvent("_na_", originalState, newState);
assertFalse("routing tables should be the same object", event.routingTableChanged());
assertFalse("index routing table should be the same object", event.indexRoutingTableChanged(initialIndices.get(0)));
// routing tables and index routing tables aren't same object
newState = createState(numNodesInCluster, randomBoolean(), initialIndices);
event = new ClusterChangedEvent("_na_", originalState, newState);
assertTrue("routing tables should not be the same object", event.routingTableChanged());
assertTrue("index routing table should not be the same object", event.indexRoutingTableChanged(initialIndices.get(0)));
// index routing tables are different because they don't exist
newState = createState(numNodesInCluster, randomBoolean(), initialIndices.subList(1, initialIndices.size()));
event = new ClusterChangedEvent("_na_", originalState, newState);
assertTrue("routing tables should not be the same object", event.routingTableChanged());
assertTrue("index routing table should not be the same object", event.indexRoutingTableChanged(initialIndices.get(0)));
}
// Tests that the indices change list is correct as well as metadata equality when the metadata has changed.
private static void metaDataChangesCheck(final boolean changeClusterUUID) {
final int numNodesInCluster = 3;
for (int i = 0; i < INDICES_CHANGE_NUM_TESTS; i++) {
final ClusterState previousState = createState(numNodesInCluster, randomBoolean(), initialIndices);
final int numAdd = randomIntBetween(0, 5); // add random # of indices to the next cluster state
final int numDel = randomIntBetween(0, initialIndices.size()); // delete random # of indices from the next cluster state
final List<String> addedIndices = addIndices(numAdd);
final List<String> delIndices = delIndices(numDel, initialIndices);
final ClusterState newState = nextState(previousState, changeClusterUUID, addedIndices, delIndices, 0);
final ClusterChangedEvent event = new ClusterChangedEvent("_na_", newState, previousState);
final List<String> addsFromEvent = event.indicesCreated();
final List<String> delsFromEvent = event.indicesDeleted();
Collections.sort(addsFromEvent);
Collections.sort(delsFromEvent);
assertThat(addsFromEvent, equalTo(addedIndices));
assertThat(delsFromEvent, changeClusterUUID ? equalTo(Collections.emptyList()) : equalTo(delIndices));
assertThat(event.metaDataChanged(), equalTo(changeClusterUUID || addedIndices.size() > 0 || delIndices.size() > 0));
}
}
private static ClusterState createSimpleClusterState() {
return ClusterState.builder(TEST_CLUSTER_NAME).build();
}
// Create a basic cluster state with a given set of indices
private static ClusterState createState(final int numNodes, final boolean isLocalMaster, final List<String> indices) {
final MetaData metaData = createMetaData(indices);
return ClusterState.builder(TEST_CLUSTER_NAME)
.nodes(createDiscoveryNodes(numNodes, isLocalMaster))
.metaData(metaData)
.routingTable(createRoutingTable(1, metaData))
.build();
}
// Create a modified cluster state from another one, but with some number of indices added and deleted.
private static ClusterState nextState(final ClusterState previousState, final boolean changeClusterUUID,
final List<String> addedIndices, final List<String> deletedIndices,
final int numNodesToRemove) {
final ClusterState.Builder builder = ClusterState.builder(previousState);
builder.stateUUID(Strings.randomBase64UUID());
final MetaData.Builder metaBuilder = MetaData.builder(previousState.metaData());
if (changeClusterUUID || addedIndices.size() > 0 || deletedIndices.size() > 0) {
// there is some change in metadata cluster state
if (changeClusterUUID) {
metaBuilder.clusterUUID(Strings.randomBase64UUID());
}
for (String index : addedIndices) {
metaBuilder.put(createIndexMetadata(index), true);
}
for (String index : deletedIndices) {
metaBuilder.remove(index);
}
builder.metaData(metaBuilder);
}
if (numNodesToRemove > 0) {
final int discoveryNodesSize = previousState.getNodes().size();
final DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(previousState.getNodes());
for (int i = 0; i < numNodesToRemove && i < discoveryNodesSize; i++) {
nodesBuilder.remove(NODE_ID_PREFIX + i);
}
builder.nodes(nodesBuilder);
}
return builder.build();
}
// Create the discovery nodes for a cluster state. For our testing purposes, we want
// the first to be master, the second to be master eligible, the third to be a data node,
// and the remainder can be any kinds of nodes (master eligible, data, or both).
private static DiscoveryNodes createDiscoveryNodes(final int numNodes, final boolean isLocalMaster) {
assert (numNodes >= 3) : "the initial cluster state for event change tests should have a minimum of 3 nodes " +
"so there are a minimum of 2 master nodes for testing master change events.";
final DiscoveryNodes.Builder builder = DiscoveryNodes.builder();
final int localNodeIndex = isLocalMaster ? 0 : randomIntBetween(1, numNodes - 1); // randomly assign the local node if not master
for (int i = 0; i < numNodes; i++) {
final String nodeId = NODE_ID_PREFIX + i;
boolean isMasterEligible = false;
boolean isData = false;
if (i == 0) {
// the master node
builder.masterNodeId(nodeId);
isMasterEligible = true;
} else if (i == 1) {
// the alternate master node
isMasterEligible = true;
} else if (i == 2) {
// we need at least one data node
isData = true;
} else {
// remaining nodes can be anything (except for master)
isMasterEligible = randomBoolean();
isData = randomBoolean();
}
final DiscoveryNode node = newNode(nodeId, isMasterEligible, isData);
builder.put(node);
if (i == localNodeIndex) {
builder.localNodeId(nodeId);
}
}
return builder.build();
}
// Create a new DiscoveryNode
private static DiscoveryNode newNode(final String nodeId, boolean isMasterEligible, boolean isData) {
final Map<String, String> attributes = MapBuilder.<String, String>newMapBuilder()
.put(DiscoveryNode.MASTER_ATTR, isMasterEligible ? "true" : "false")
.put(DiscoveryNode.DATA_ATTR, isData ? "true": "false")
.immutableMap();
return new DiscoveryNode(nodeId, nodeId, DummyTransportAddress.INSTANCE, attributes, Version.CURRENT);
}
// Create the metadata for a cluster state.
private static MetaData createMetaData(final List<String> indices) {
final MetaData.Builder builder = MetaData.builder();
builder.clusterUUID(INITIAL_CLUSTER_ID);
for (String index : indices) {
builder.put(createIndexMetadata(index), true);
}
return builder.build();
}
// Create the index metadata for a given index.
private static IndexMetaData createIndexMetadata(final String index) {
return createIndexMetadata(index, 1);
}
// Create the index metadata for a given index, with the specified version.
private static IndexMetaData createIndexMetadata(final String index, final long version) {
return IndexMetaData.builder(index)
.settings(settings)
.numberOfShards(1)
.numberOfReplicas(0)
.creationDate(System.currentTimeMillis())
.version(version)
.build();
}
// Create the routing table for a cluster state.
private static RoutingTable createRoutingTable(final long version, final MetaData metaData) {
final RoutingTable.Builder builder = RoutingTable.builder().version(version);
for (ObjectCursor<IndexMetaData> cursor : metaData.indices().values()) {
builder.addAsNew(cursor.value);
}
return builder.build();
}
// Create a list of indices to add
private static List<String> addIndices(final int numIndices) {
final List<String> list = new ArrayList<>();
for (int i = 0; i < numIndices; i++) {
list.add("newIdx_" + i);
}
return list;
}
// Create a list of indices to delete from a list that already belongs to a particular cluster state.
private static List<String> delIndices(final int numIndices, final List<String> currIndices) {
final List<String> list = new ArrayList<>();
for (int i = 0; i < numIndices; i++) {
list.add(currIndices.get(i));
}
return list;
}
}

View File

@ -581,8 +581,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
// restore GC // restore GC
masterNodeDisruption.stopDisrupting(); masterNodeDisruption.stopDisrupting();
ensureStableCluster(3, new TimeValue(DISRUPTION_HEALING_OVERHEAD.millis() + masterNodeDisruption.expectedTimeToHeal().millis()), false, ensureStableCluster(3, new TimeValue(DISRUPTION_HEALING_OVERHEAD.millis() + masterNodeDisruption.expectedTimeToHeal().millis()), false, oldNonMasterNodes.get(0));
oldNonMasterNodes.get(0));
// make sure all nodes agree on master // make sure all nodes agree on master
String newMaster = internalCluster().getMasterName(); String newMaster = internalCluster().getMasterName();
@ -1072,11 +1071,13 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
assertTrue(client().prepareGet("index", "doc", "1").get().isExists()); assertTrue(client().prepareGet("index", "doc", "1").get().isExists());
} }
// tests if indices are really deleted even if a master transition inbetween /**
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/11665") * Tests that indices are properly deleted even if there is a master transition in between.
* Test for https://github.com/elastic/elasticsearch/issues/11665
*/
public void testIndicesDeleted() throws Exception { public void testIndicesDeleted() throws Exception {
configureUnicastCluster(3, null, 2); configureUnicastCluster(3, null, 2);
InternalTestCluster.Async<List<String>> masterNodes= internalCluster().startMasterOnlyNodesAsync(2); InternalTestCluster.Async<List<String>> masterNodes = internalCluster().startMasterOnlyNodesAsync(2);
InternalTestCluster.Async<String> dataNode = internalCluster().startDataOnlyNodeAsync(); InternalTestCluster.Async<String> dataNode = internalCluster().startDataOnlyNodeAsync();
dataNode.get(); dataNode.get();
masterNodes.get(); masterNodes.get();

View File

@ -1,59 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.http.netty;
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.BoundTransportAddress;
import org.elasticsearch.common.transport.InetSocketTransportAddress;
import org.elasticsearch.http.HttpTransportSettings;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
import org.elasticsearch.test.ESIntegTestCase.Scope;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.Matchers.arrayWithSize;
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
import static org.hamcrest.Matchers.instanceOf;
@ClusterScope(scope = Scope.SUITE, numDataNodes = 1)
public class HttpPublishPortIT extends ESIntegTestCase {
@Override
protected Settings nodeSettings(int nodeOrdinal) {
return Settings.settingsBuilder()
.put(super.nodeSettings(nodeOrdinal))
.put(NetworkModule.HTTP_ENABLED.getKey(), true)
.put(HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT.getKey(), 9080)
.build();
}
public void testHttpPublishPort() throws Exception {
NodesInfoResponse response = client().admin().cluster().prepareNodesInfo().clear().setHttp(true).get();
assertThat(response.getNodes(), arrayWithSize(greaterThanOrEqualTo(1)));
NodeInfo nodeInfo = response.getNodes()[0];
BoundTransportAddress address = nodeInfo.getHttp().address();
assertThat(address.publishAddress(), instanceOf(InetSocketTransportAddress.class));
InetSocketTransportAddress publishAddress = (InetSocketTransportAddress) address.publishAddress();
assertThat(publishAddress.address().getPort(), is(9080));
}
}

View File

@ -0,0 +1,91 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.http.netty;
import org.elasticsearch.common.network.NetworkUtils;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.InetSocketTransportAddress;
import org.elasticsearch.http.BindHttpException;
import org.elasticsearch.http.HttpTransportSettings;
import org.elasticsearch.test.ESTestCase;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.List;
import static java.net.InetAddress.getByName;
import static java.util.Arrays.asList;
import static org.elasticsearch.http.netty.NettyHttpServerTransport.resolvePublishPort;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
public class NettyHttpPublishPortTests extends ESTestCase {
public void testHttpPublishPort() throws Exception {
int boundPort = randomIntBetween(9000, 9100);
int otherBoundPort = randomIntBetween(9200, 9300);
int publishPort = resolvePublishPort(Settings.builder().put(HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT.getKey(), 9080).build(),
randomAddresses(), getByName("127.0.0.2"));
assertThat("Publish port should be explicitly set to 9080", publishPort, equalTo(9080));
publishPort = resolvePublishPort(Settings.EMPTY, asList(address("127.0.0.1", boundPort), address("127.0.0.2", otherBoundPort)),
getByName("127.0.0.1"));
assertThat("Publish port should be derived from matched address", publishPort, equalTo(boundPort));
publishPort = resolvePublishPort(Settings.EMPTY, asList(address("127.0.0.1", boundPort), address("127.0.0.2", boundPort)),
getByName("127.0.0.3"));
assertThat("Publish port should be derived from unique port of bound addresses", publishPort, equalTo(boundPort));
try {
resolvePublishPort(Settings.EMPTY, asList(address("127.0.0.1", boundPort), address("127.0.0.2", otherBoundPort)),
getByName("127.0.0.3"));
fail("Expected BindHttpException as publish_port not specified and non-unique port of bound addresses");
} catch (BindHttpException e) {
assertThat(e.getMessage(), containsString("Failed to auto-resolve http publish port"));
}
publishPort = resolvePublishPort(Settings.EMPTY, asList(address("0.0.0.0", boundPort), address("127.0.0.2", otherBoundPort)),
getByName("127.0.0.1"));
assertThat("Publish port should be derived from matching wildcard address", publishPort, equalTo(boundPort));
if (NetworkUtils.SUPPORTS_V6) {
publishPort = resolvePublishPort(Settings.EMPTY, asList(address("0.0.0.0", boundPort), address("127.0.0.2", otherBoundPort)),
getByName("::1"));
assertThat("Publish port should be derived from matching wildcard address", publishPort, equalTo(boundPort));
}
}
private InetSocketTransportAddress address(String host, int port) throws UnknownHostException {
return new InetSocketTransportAddress(getByName(host), port);
}
private InetSocketTransportAddress randomAddress() throws UnknownHostException {
return address("127.0.0." + randomIntBetween(1, 100), randomIntBetween(9200, 9300));
}
private List<InetSocketTransportAddress> randomAddresses() throws UnknownHostException {
List<InetSocketTransportAddress> addresses = new ArrayList<>();
for (int i = 0; i < randomIntBetween(1, 5); i++) {
addresses.add(randomAddress());
}
return addresses;
}
}

View File

@ -19,8 +19,10 @@
package org.elasticsearch.ingest; package org.elasticsearch.ingest;
import org.elasticsearch.cluster.DiffableUtils;
import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentFactory;
@ -32,6 +34,10 @@ import java.io.IOException;
import java.util.HashMap; import java.util.HashMap;
import java.util.Map; import java.util.Map;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.not;
public class IngestMetadataTests extends ESTestCase { public class IngestMetadataTests extends ESTestCase {
public void testFromXContent() throws IOException { public void testFromXContent() throws IOException {
@ -61,4 +67,63 @@ public class IngestMetadataTests extends ESTestCase {
assertEquals(pipeline.getConfigAsMap(), m.getPipelines().get("1").getConfigAsMap()); assertEquals(pipeline.getConfigAsMap(), m.getPipelines().get("1").getConfigAsMap());
assertEquals(pipeline2.getConfigAsMap(), m.getPipelines().get("2").getConfigAsMap()); assertEquals(pipeline2.getConfigAsMap(), m.getPipelines().get("2").getConfigAsMap());
} }
public void testDiff() throws Exception {
BytesReference pipelineConfig = new BytesArray("{}");
Map<String, PipelineConfiguration> pipelines = new HashMap<>();
pipelines.put("1", new PipelineConfiguration("1", pipelineConfig));
pipelines.put("2", new PipelineConfiguration("2", pipelineConfig));
IngestMetadata ingestMetadata1 = new IngestMetadata(pipelines);
pipelines = new HashMap<>();
pipelines.put("1", new PipelineConfiguration("1", pipelineConfig));
pipelines.put("3", new PipelineConfiguration("3", pipelineConfig));
pipelines.put("4", new PipelineConfiguration("4", pipelineConfig));
IngestMetadata ingestMetadata2 = new IngestMetadata(pipelines);
IngestMetadata.IngestMetadataDiff diff = (IngestMetadata.IngestMetadataDiff) ingestMetadata2.diff(ingestMetadata1);
assertThat(((DiffableUtils.MapDiff)diff.pipelines).getDeletes().size(), equalTo(1));
assertThat(((DiffableUtils.MapDiff)diff.pipelines).getDeletes().get(0), equalTo("2"));
assertThat(((DiffableUtils.MapDiff)diff.pipelines).getUpserts().size(), equalTo(2));
assertThat(((DiffableUtils.MapDiff)diff.pipelines).getUpserts().containsKey("3"), is(true));
assertThat(((DiffableUtils.MapDiff)diff.pipelines).getUpserts().containsKey("4"), is(true));
IngestMetadata endResult = (IngestMetadata) diff.apply(ingestMetadata2);
assertThat(endResult, not(equalTo(ingestMetadata1)));
assertThat(endResult.getPipelines().size(), equalTo(3));
assertThat(endResult.getPipelines().get("1"), equalTo(new PipelineConfiguration("1", pipelineConfig)));
assertThat(endResult.getPipelines().get("3"), equalTo(new PipelineConfiguration("3", pipelineConfig)));
assertThat(endResult.getPipelines().get("4"), equalTo(new PipelineConfiguration("4", pipelineConfig)));
pipelines = new HashMap<>();
pipelines.put("1", new PipelineConfiguration("1", new BytesArray("{}")));
pipelines.put("2", new PipelineConfiguration("2", new BytesArray("{}")));
IngestMetadata ingestMetadata3 = new IngestMetadata(pipelines);
diff = (IngestMetadata.IngestMetadataDiff) ingestMetadata3.diff(ingestMetadata1);
assertThat(((DiffableUtils.MapDiff)diff.pipelines).getDeletes().size(), equalTo(0));
assertThat(((DiffableUtils.MapDiff)diff.pipelines).getUpserts().size(), equalTo(0));
endResult = (IngestMetadata) diff.apply(ingestMetadata3);
assertThat(endResult, equalTo(ingestMetadata1));
assertThat(endResult.getPipelines().size(), equalTo(2));
assertThat(endResult.getPipelines().get("1"), equalTo(new PipelineConfiguration("1", pipelineConfig)));
assertThat(endResult.getPipelines().get("2"), equalTo(new PipelineConfiguration("2", pipelineConfig)));
pipelines = new HashMap<>();
pipelines.put("1", new PipelineConfiguration("1", new BytesArray("{}")));
pipelines.put("2", new PipelineConfiguration("2", new BytesArray("{\"key\" : \"value\"}")));
IngestMetadata ingestMetadata4 = new IngestMetadata(pipelines);
diff = (IngestMetadata.IngestMetadataDiff) ingestMetadata4.diff(ingestMetadata1);
assertThat(((DiffableUtils.MapDiff)diff.pipelines).getDiffs().size(), equalTo(1));
assertThat(((DiffableUtils.MapDiff)diff.pipelines).getDiffs().containsKey("2"), is(true));
endResult = (IngestMetadata) diff.apply(ingestMetadata4);
assertThat(endResult, not(equalTo(ingestMetadata1)));
assertThat(endResult.getPipelines().size(), equalTo(2));
assertThat(endResult.getPipelines().get("1"), equalTo(new PipelineConfiguration("1", pipelineConfig)));
assertThat(endResult.getPipelines().get("2"), equalTo(new PipelineConfiguration("2", new BytesArray("{\"key\" : \"value\"}"))));
}
} }

View File

@ -0,0 +1,110 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.transport.netty;
import org.elasticsearch.common.network.NetworkUtils;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.transport.BindTransportException;
import org.elasticsearch.transport.TransportSettings;
import java.net.InetSocketAddress;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.List;
import static java.net.InetAddress.getByName;
import static java.util.Arrays.asList;
import static org.elasticsearch.transport.netty.NettyTransport.resolvePublishPort;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
public class NettyPublishPortTests extends ESTestCase {
public void testPublishPort() throws Exception {
int boundPort = randomIntBetween(9000, 9100);
int otherBoundPort = randomIntBetween(9200, 9300);
boolean useProfile = randomBoolean();
final String profile;
final Settings settings;
final Settings profileSettings;
if (useProfile) {
profile = "some_profile";
settings = randomBoolean() ? Settings.EMPTY : Settings.builder().put(TransportSettings.PUBLISH_PORT.getKey(), 9081).build();
profileSettings = Settings.builder().put("publish_port", 9080).build();
} else {
profile = TransportSettings.DEFAULT_PROFILE;
settings = Settings.builder().put(TransportSettings.PUBLISH_PORT.getKey(), 9081).build();
profileSettings = randomBoolean() ? Settings.EMPTY : Settings.builder().put("publish_port", 9080).build();;
}
int publishPort = resolvePublishPort(profile, settings, profileSettings,
randomAddresses(), getByName("127.0.0.2"));
assertThat("Publish port should be explicitly set", publishPort, equalTo(useProfile ? 9080 : 9081));
publishPort = resolvePublishPort(profile, Settings.EMPTY, Settings.EMPTY,
asList(address("127.0.0.1", boundPort), address("127.0.0.2", otherBoundPort)),
getByName("127.0.0.1"));
assertThat("Publish port should be derived from matched address", publishPort, equalTo(boundPort));
publishPort = resolvePublishPort(profile, Settings.EMPTY, Settings.EMPTY,
asList(address("127.0.0.1", boundPort), address("127.0.0.2", boundPort)),
getByName("127.0.0.3"));
assertThat("Publish port should be derived from unique port of bound addresses", publishPort, equalTo(boundPort));
try {
resolvePublishPort(profile, Settings.EMPTY, Settings.EMPTY,
asList(address("127.0.0.1", boundPort), address("127.0.0.2", otherBoundPort)),
getByName("127.0.0.3"));
fail("Expected BindTransportException as publish_port not specified and non-unique port of bound addresses");
} catch (BindTransportException e) {
assertThat(e.getMessage(), containsString("Failed to auto-resolve publish port"));
}
publishPort = resolvePublishPort(profile, Settings.EMPTY, Settings.EMPTY,
asList(address("0.0.0.0", boundPort), address("127.0.0.2", otherBoundPort)),
getByName("127.0.0.1"));
assertThat("Publish port should be derived from matching wildcard address", publishPort, equalTo(boundPort));
if (NetworkUtils.SUPPORTS_V6) {
publishPort = resolvePublishPort(profile, Settings.EMPTY, Settings.EMPTY,
asList(address("0.0.0.0", boundPort), address("127.0.0.2", otherBoundPort)),
getByName("::1"));
assertThat("Publish port should be derived from matching wildcard address", publishPort, equalTo(boundPort));
}
}
private InetSocketAddress address(String host, int port) throws UnknownHostException {
return new InetSocketAddress(getByName(host), port);
}
private InetSocketAddress randomAddress() throws UnknownHostException {
return address("127.0.0." + randomIntBetween(1, 100), randomIntBetween(9200, 9300));
}
private List<InetSocketAddress> randomAddresses() throws UnknownHostException {
List<InetSocketAddress> addresses = new ArrayList<>();
for (int i = 0; i < randomIntBetween(1, 5); i++) {
addresses.add(randomAddress());
}
return addresses;
}
}

View File

@ -9,8 +9,12 @@ cluster, or individual indices.
% curl 192.168.56.10:9200/_cat/indices % curl 192.168.56.10:9200/_cat/indices
green wiki1 3 0 10000 331 168.5mb 168.5mb green wiki1 3 0 10000 331 168.5mb 168.5mb
green wiki2 3 0 428 0 8mb 8mb green wiki2 3 0 428 0 8mb 8mb
% curl 192.168.56.10:9200/_cat/count % curl 192.168.56.10:9200/_cat/count
1384314124582 19:42:04 10428 1384314124582 19:42:04 10428
% curl 192.168.56.10:9200/_cat/count/wiki2 % curl 192.168.56.10:9200/_cat/count/wiki2
1384314139815 19:42:19 428 1384314139815 19:42:19 428
-------------------------------------------------- --------------------------------------------------
NOTE: The document count indicates the number of live documents and does not include deleted documents which have not yet been cleaned up by the merge process.

View File

@ -85,6 +85,7 @@ k0zy 192.168.56.10 9300 {version} m
|`pid` |`p` |No |Process ID |13061 |`pid` |`p` |No |Process ID |13061
|`ip` |`i` |Yes |IP address |127.0.1.1 |`ip` |`i` |Yes |IP address |127.0.1.1
|`port` |`po` |No |Bound transport port |9300 |`port` |`po` |No |Bound transport port |9300
|`http_address` |`http`| No | bound http address | 127.0.0.1:9200
|`version` |`v` |No |Elasticsearch version |{version} |`version` |`v` |No |Elasticsearch version |{version}
|`build` |`b` |No |Elasticsearch Build hash |5c03844 |`build` |`b` |No |Elasticsearch Build hash |5c03844
|`jdk` |`j` |No |Running Java version |1.8.0 |`jdk` |`j` |No |Running Java version |1.8.0

View File

@ -0,0 +1,461 @@
[[docs-reindex]]
==== Reindex API
`_reindex`'s most basic form just copies documents from one index to another.
This will copy documents from `twitter` into `new_twitter`:
[source,js]
--------------------------------------------------
POST /_reindex
{
"source": {
"index": "twitter"
},
"dest": {
"index": "new_twitter"
}
}
--------------------------------------------------
// AUTOSENSE
That will return something like this:
[source,js]
--------------------------------------------------
{
"took" : 639,
"updated": 112,
"batches": 130,
"version_conflicts": 0,
"failures" : [ ],
"created": 12344
}
--------------------------------------------------
Just like `_update_by_query`, `_reindex` gets a snapshot of the source index
but its target must be a **different** index so version conflicts are unlikely.
The `dest` element can be configured like the index API to control optimistic
concurrency control. Just leaving out `version_type` (as above) or setting it
to `internal` will cause Elasticsearch to blindly dump documents into the
target, overwriting any that happen to have the same type and id:
[source,js]
--------------------------------------------------
POST /_reindex
{
"source": {
"index": "twitter"
},
"dest": {
"index": "new_twitter",
"version_type": "internal"
}
}
--------------------------------------------------
// AUTOSENSE
Setting `version_type` to `external` will cause Elasticsearch to preserve the
`version` from the source, create any documents that are missing, and update
any documents that have an older version in the destination index than they do
in the source index:
[source,js]
--------------------------------------------------
POST /_reindex
{
"source": {
"index": "twitter"
},
"dest": {
"index": "new_twitter",
"version_type": "external"
}
}
--------------------------------------------------
// AUTOSENSE
Settings `op_type` to `create` will cause `_reindex` to only create missing
documents in the target index. All existing documents will cause a version
conflict:
[source,js]
--------------------------------------------------
POST /_reindex
{
"source": {
"index": "twitter"
},
"dest": {
"index": "new_twitter",
"op_type": "create"
}
}
--------------------------------------------------
// AUTOSENSE
By default version conflicts abort the `_reindex` process but you can just
count them by settings `"conflicts": "proceed"` in the request body:
[source,js]
--------------------------------------------------
POST /_reindex
{
"conflicts": "proceed",
"source": {
"index": "twitter"
},
"dest": {
"index": "new_twitter",
"op_type": "create"
}
}
--------------------------------------------------
// AUTOSENSE
You can limit the documents by adding a type to the `source` or by adding a
query. This will only copy `tweet`s made by `kimchy` into `new_twitter`:
[source,js]
--------------------------------------------------
POST /_reindex
{
"source": {
"index": "twitter",
"type": "tweet",
"query": {
"term": {
"user": "kimchy"
}
}
},
"dest": {
"index": "new_twitter"
}
}
--------------------------------------------------
// AUTOSENSE
`index` and `type` in `source` can both be lists, allowing you to copy from
lots of sources in one request. This will copy documents from the `tweet` and
`post` types in the `twitter` and `blog` index. It'd include the `post` type in
the `twitter` index and the `tweet` type in the `blog` index. If you want to be
more specific you'll need to use the `query`. It also makes no effort to handle
id collisions. The target index will remain valid but it's not easy to predict
which document will survive because the iteration order isn't well defined.
Just avoid that situation, ok?
[source,js]
--------------------------------------------------
POST /_reindex
{
"source": {
"index": ["twitter", "blog"],
"type": ["tweet", "post"]
},
"index": {
"index": "all_together"
}
}
--------------------------------------------------
// AUTOSENSE
It's also possible to limit the number of processed documents by setting
`size`. This will only copy a single document from `twitter` to
`new_twitter`:
[source,js]
--------------------------------------------------
POST /_reindex
{
"size": 1,
"source": {
"index": "twitter"
},
"dest": {
"index": "new_twitter"
}
}
--------------------------------------------------
// AUTOSENSE
If you want a particular set of documents from the twitter index you'll
need to sort. Sorting makes the scroll less efficient but in some contexts
it's worth it. If possible, prefer a more selective query to `size` and `sort`.
This will copy 10000 documents from `twitter` into `new_twitter`:
[source,js]
--------------------------------------------------
POST /_reindex
{
"size": 10000,
"source": {
"index": "twitter",
"sort": { "date": "desc" }
},
"dest": {
"index": "new_twitter"
}
}
--------------------------------------------------
// AUTOSENSE
Like `_update_by_query`, `_reindex` supports a script that modifies the
document. Unlike `_update_by_query`, the script is allowed to modify the
document's metadata. This example bumps the version of the source document:
[source,js]
--------------------------------------------------
POST /_reindex
{
"source": {
"index": "twitter",
},
"dest": {
"index": "new_twitter",
"version_type": "external"
}
"script": {
"internal": "if (ctx._source.foo == 'bar') {ctx._version++; ctx._source.remove('foo')}"
}
}
--------------------------------------------------
// AUTOSENSE
Think of the possibilities! Just be careful! With great power.... You can
change:
* "_id"
* "_type"
* "_index"
* "_version"
* "_routing"
* "_parent"
* "_timestamp"
* "_ttl"
Setting `_version` to `null` or clearing it from the `ctx` map is just like not
sending the version in an indexing request. It will cause that document to be
overwritten in the target index regardless of the version on the target or the
version type you use in the `_reindex` request.
By default if `_reindex` sees a document with routing then the routing is
preserved unless it's changed by the script. You can set `routing` on the
`dest` request to change this:
`keep`::
Sets the routing on the bulk request sent for each match to the routing on
the match. The default.
`discard`::
Sets the routing on the bulk request sent for each match to null.
`=<some text>`::
Sets the routing on the bulk request sent for each match to all text after
the `=`.
For example, you can use the following request to copy all documents from
the `source` index with the company name `cat` into the `dest` index with
routing set to `cat`.
[source,js]
--------------------------------------------------
POST /_reindex
{
"source": {
"index": "source"
"query": {
"match": {
"company": "cat"
}
}
}
"index": {
"index": "dest",
"routing": "=cat"
}
}
--------------------------------------------------
// AUTOSENSE
[float]
=== URL Parameters
In addition to the standard parameters like `pretty`, the Reindex API also
supports `refresh`, `wait_for_completion`, `consistency`, and `timeout`.
Sending the `refresh` url parameter will cause all indexes to which the request
wrote to be refreshed. This is different than the Index API's `refresh`
parameter which causes just the shard that received the new data to be indexed.
If the request contains `wait_for_completion=false` then Elasticsearch will
perform some preflight checks, launch the request, and then return a `task`
which can be used with <<docs-reindex-task-api,Tasks APIs>> to cancel or get
the status of the task. For now, once the request is finished the task is gone
and the only place to look for the ultimate result of the task is in the
Elasticsearch log file. This will be fixed soon.
`consistency` controls how many copies of a shard must respond to each write
request. `timeout` controls how long each write request waits for unavailable
shards to become available. Both work exactly how they work in the
{ref}/docs-bulk.html[Bulk API].
`timeout` controls how long each batch waits for the target shard to become
available. It works exactly how it works in the {ref}/docs-bulk.html[Bulk API].
[float]
=== Response body
The JSON response looks like this:
[source,js]
--------------------------------------------------
{
"took" : 639,
"updated": 0,
"created": 123,
"batches": 1,
"version_conflicts": 2,
"failures" : [ ]
}
--------------------------------------------------
`took`::
The number of milliseconds from start to end of the whole operation.
`updated`::
The number of documents that were successfully updated.
`created`::
The number of documents that were successfully created.
`batches`::
The number of scroll responses pulled back by the the reindex.
`version_conflicts`::
The number of version conflicts that reindex hit.
`failures`::
Array of all indexing failures. If this is non-empty then the request aborted
because of those failures. See `conflicts` for how to prevent version conflicts
from aborting the operation.
[float]
[[docs-reindex-task-api]]
=== Works with the Task API
While Reindex is running you can fetch their status using the
{ref}/task/list.html[Task List APIs]:
[source,js]
--------------------------------------------------
POST /_tasks/?pretty&detailed=true&actions=*reindex
--------------------------------------------------
// AUTOSENSE
The responses looks like:
[source,js]
--------------------------------------------------
{
"nodes" : {
"r1A2WoRbTwKZ516z6NEs5A" : {
"name" : "Tyrannus",
"transport_address" : "127.0.0.1:9300",
"host" : "127.0.0.1",
"ip" : "127.0.0.1:9300",
"attributes" : {
"testattr" : "test",
"portsfile" : "true"
},
"tasks" : [ {
"node" : "r1A2WoRbTwKZ516z6NEs5A",
"id" : 36619,
"type" : "transport",
"action" : "indices:data/write/reindex",
"status" : { <1>
"total" : 6154,
"updated" : 3500,
"created" : 0,
"deleted" : 0,
"batches" : 36,
"version_conflicts" : 0,
"noops" : 0
},
"description" : ""
} ]
}
}
}
--------------------------------------------------
<1> this object contains the actual status. It is just like the response json
with the important addition of the `total` field. `total` is the total number
of operations that the reindex expects to perform. You can estimate the
progress by adding the `updated`, `created`, and `deleted` fields. The request
will finish when their sum is equal to the `total` field.
[float]
=== Examples
==== Change the name of a field
`_reindex` can be used to build a copy of an index with renamed fields. Say you
create an index containing documents that look like this:
[source,js]
--------------------------------------------------
POST test/test/1?refresh&pretty
{
"text": "words words",
"flag": "foo"
}
--------------------------------------------------
// AUTOSENSE
But you don't like the name `flag` and want to replace it with `tag`.
`_reindex` can create the other index for you:
[source,js]
--------------------------------------------------
POST _reindex?pretty
{
"source": {
"index": "test"
},
"dest": {
"index": "test2"
},
"script": {
"inline": "ctx._source.tag = ctx._source.remove(\"flag\")"
}
}
--------------------------------------------------
// AUTOSENSE
Now you can get the new document:
[source,js]
--------------------------------------------------
GET test2/test/1?pretty
--------------------------------------------------
// AUTOSENSE
and it'll look like:
[source,js]
--------------------------------------------------
{
"text": "words words",
"tag": "foo"
}
--------------------------------------------------
Or you can search by `tag` or whatever you want.

View File

@ -0,0 +1,358 @@
[[docs-update-by-query]]
==== Update By Query API
The simplest usage of `_update_by_query` just performs an update on every
document in the index without changing the source. This is useful to
<<picking-up-a-new-property,pick up a new property>> or some other online
mapping change. Here is the API:
[source,js]
--------------------------------------------------
POST /twitter/_update_by_query?conflicts=proceed
--------------------------------------------------
// AUTOSENSE
That will return something like this:
[source,js]
--------------------------------------------------
{
"took" : 639,
"updated": 1235,
"batches": 13,
"version_conflicts": 2,
"failures" : [ ]
}
--------------------------------------------------
`_update_by_query` gets a snapshot of the index when it starts and indexes what
it finds using `internal` versioning. That means that you'll get a version
conflict if the document changes between the time when the snapshot was taken
and when the index request is processed. When the versions match the document
is updated and the version number is incremented.
All update and query failures cause the `_update_by_query` to abort and are
returned in the `failures` of the response. The updates that have been
performed still stick. In other words, the process is not rolled back, only
aborted. While the first failure causes the abort all failures that are
returned by the failing bulk request are returned in the `failures` element so
it's possible for there to be quite a few.
If you want to simply count version conflicts not cause the `_update_by_query`
to abort you can set `conflicts=proceed` on the url or `"conflicts": "proceed"`
in the request body. The first example does this because it is just trying to
pick up an online mapping change and a version conflict simply means that the
conflicting document was updated between the start of the `_update_by_query`
and the time when it attempted to update the document. This is fine because
that update will have picked up the online mapping update.
Back to the API format, you can limit `_update_by_query` to a single type. This
will only update `tweet`s from the `twitter` index:
[source,js]
--------------------------------------------------
POST /twitter/tweet/_update_by_query?conflicts=proceed
--------------------------------------------------
// AUTOSENSE
You can also limit `_update_by_query` using the
{ref}/query-dsl.html[Query DSL]. This will update all documents from the
`twitter` index for the user `kimchy`:
[source,js]
--------------------------------------------------
POST /twitter/_update_by_query?conflicts=proceed
{
"query": { <1>
"term": {
"user": "kimchy"
}
}
}
--------------------------------------------------
// AUTOSENSE
<1> The query must be passed as a value to the `query` key, in the same
way as the {ref}/search-search.html[Search API]. You can also use the `q`
parameter in the same way as the search api.
So far we've only been updating documents without changing their source. That
is genuinely useful for things like
<<picking-up-a-new-property,picking up new properties>> but it's only half the
fun. `_update_by_query` supports a `script` object to update the document. This
will increment the `likes` field on all of kimchy's tweets:
[source,js]
--------------------------------------------------
POST /twitter/_update_by_query
{
"script": {
"inline": "ctx._source.likes++"
},
"query": {
"term": {
"user": "kimchy"
}
}
}
--------------------------------------------------
// AUTOSENSE
Just as in {ref}/docs-update.html[Update API] you can set `ctx.op = "noop"` if
your script decides that it doesn't have to make any changes. That will cause
`_update_by_query` to omit that document from its updates. Setting `ctx.op` to
anything else is an error. If you want to delete by a query you can use the
<<plugins-delete-by-query,Delete by Query Plugin>> instead. Setting any other
field in `ctx` is an error.
Note that we stopped specifying `conflicts=proceed`. In this case we want a
version conflict to abort the process so we can handle the failure.
This API doesn't allow you to move the documents it touches, just modify their
source. This is intentional! We've made no provisions for removing the document
from its original location.
It's also possible to do this whole thing on multiple indexes and multiple
types at once, just like the search API:
[source,js]
--------------------------------------------------
POST /twitter,blog/tweet,post/_update_by_query
--------------------------------------------------
// AUTOSENSE
If you provide `routing` then the routing is copied to the scroll query,
limiting the process to the shards that match that routing value:
[source,js]
--------------------------------------------------
POST /twitter/_update_by_query?routing=1
--------------------------------------------------
// AUTOSENSE
By default `_update_by_query` uses scroll batches of 100. You can change the
batch size with the `scroll_size` URL parameter:
[source,js]
--------------------------------------------------
POST /twitter/_update_by_query?scroll_size=1000
--------------------------------------------------
// AUTOSENSE
[float]
=== URL Parameters
In addition to the standard parameters like `pretty`, the Update By Query API
also supports `refresh`, `wait_for_completion`, `consistency`, and `timeout`.
Sending the `refresh` will update all shards in the index being updated when
the request completes. This is different than the Index API's `refresh`
parameter which causes just the shard that received the new data to be indexed.
If the request contains `wait_for_completion=false` then Elasticsearch will
perform some preflight checks, launch the request, and then return a `task`
which can be used with <<docs-update-by-query-task-api,Tasks APIs>> to cancel
or get the status of the task. For now, once the request is finished the task
is gone and the only place to look for the ultimate result of the task is in
the Elasticsearch log file. This will be fixed soon.
`consistency` controls how many copies of a shard must respond to each write
request. `timeout` controls how long each write request waits for unavailable
shards to become available. Both work exactly how they work in the
{ref}/docs-bulk.html[Bulk API].
`timeout` controls how long each batch waits for the target shard to become
available. It works exactly how it works in the {ref}/docs-bulk.html[Bulk API].
[float]
=== Response body
The JSON response looks like this:
[source,js]
--------------------------------------------------
{
"took" : 639,
"updated": 0,
"batches": 1,
"version_conflicts": 2,
"failures" : [ ]
}
--------------------------------------------------
`took`::
The number of milliseconds from start to end of the whole operation.
`updated`::
The number of documents that were successfully updated.
`batches`::
The number of scroll responses pulled back by the the update by query.
`version_conflicts`::
The number of version conflicts that the update by query hit.
`failures`::
Array of all indexing failures. If this is non-empty then the request aborted
because of those failures. See `conflicts` for how to prevent version conflicts
from aborting the operation.
[float]
[[docs-update-by-query-task-api]]
=== Works with the Task API
While Update By Query is running you can fetch their status using the
{ref}/task/list.html[Task List APIs]:
[source,js]
--------------------------------------------------
POST /_tasks/?pretty&detailed=true&action=byquery
--------------------------------------------------
// AUTOSENSE
The responses looks like:
[source,js]
--------------------------------------------------
{
"nodes" : {
"r1A2WoRbTwKZ516z6NEs5A" : {
"name" : "Tyrannus",
"transport_address" : "127.0.0.1:9300",
"host" : "127.0.0.1",
"ip" : "127.0.0.1:9300",
"attributes" : {
"testattr" : "test",
"portsfile" : "true"
},
"tasks" : [ {
"node" : "r1A2WoRbTwKZ516z6NEs5A",
"id" : 36619,
"type" : "transport",
"action" : "indices:data/write/update/byquery",
"status" : { <1>
"total" : 6154,
"updated" : 3500,
"created" : 0,
"deleted" : 0,
"batches" : 36,
"version_conflicts" : 0,
"noops" : 0
},
"description" : ""
} ]
}
}
}
--------------------------------------------------
<1> this object contains the actual status. It is just like the response json
with the important addition of the `total` field. `total` is the total number
of operations that the reindex expects to perform. You can estimate the
progress by adding the `updated`, `created`, and `deleted` fields. The request
will finish when their sum is equal to the `total` field.
[float]
=== Examples
[[picking-up-a-new-property]]
==== Pick up a new property
Say you created an index without dynamic mapping, filled it with data, and then
added a mapping value to pick up more fields from the data:
[source,js]
--------------------------------------------------
PUT test
{
"mappings": {
"test": {
"dynamic": false, <1>
"properties": {
"text": {"type": "string"}
}
}
}
}
POST test/test?refresh
{
"text": "words words",
"flag": "bar"
}'
POST test/test?refresh
{
"text": "words words",
"flag": "foo"
}'
PUT test/_mapping/test <2>
{
"properties": {
"text": {"type": "string"},
"flag": {"type": "string", "analyzer": "keyword"}
}
}
--------------------------------------------------
// AUTOSENSE
<1> This means that new fields won't be indexed, just stored in `_source`.
<2> This updates the mapping to add the new `flag` field. To pick up the new
field you have to reindex all documents with it.
Searching for the data won't find anything:
[source,js]
--------------------------------------------------
POST test/_search?filter_path=hits.total
{
"query": {
"match": {
"flag": "foo"
}
}
}
--------------------------------------------------
// AUTOSENSE
[source,js]
--------------------------------------------------
{
"hits" : {
"total" : 0
}
}
--------------------------------------------------
But you can issue an `_update_by_query` request to pick up the new mapping:
[source,js]
--------------------------------------------------
POST test/_update_by_query?refresh&conflicts=proceed
POST test/_search?filter_path=hits.total
{
"query": {
"match": {
"flag": "foo"
}
}
}
--------------------------------------------------
// AUTOSENSE
[source,js]
--------------------------------------------------
{
"hits" : {
"total" : 1
}
}
--------------------------------------------------
Hurray! You can do the exact same thing when adding a field to a multifield.

View File

@ -92,7 +92,7 @@ import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.script.Script; import org.elasticsearch.script.Script;
import org.elasticsearch.script.groovy.GroovyPlugin; import org.elasticsearch.script.groovy.GroovyPlugin;
import org.elasticsearch.search.action.SearchServiceTransportAction; import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.Task;
import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
@ -580,8 +580,8 @@ public class IndicesRequestTests extends ESIntegTestCase {
} }
public void testSearchQueryThenFetch() throws Exception { public void testSearchQueryThenFetch() throws Exception {
interceptTransportActions(SearchServiceTransportAction.QUERY_ACTION_NAME, interceptTransportActions(SearchTransportService.QUERY_ACTION_NAME,
SearchServiceTransportAction.FETCH_ID_ACTION_NAME, SearchServiceTransportAction.FREE_CONTEXT_ACTION_NAME); SearchTransportService.FETCH_ID_ACTION_NAME, SearchTransportService.FREE_CONTEXT_ACTION_NAME);
String[] randomIndicesOrAliases = randomIndicesOrAliases(); String[] randomIndicesOrAliases = randomIndicesOrAliases();
for (int i = 0; i < randomIndicesOrAliases.length; i++) { for (int i = 0; i < randomIndicesOrAliases.length; i++) {
@ -595,14 +595,14 @@ public class IndicesRequestTests extends ESIntegTestCase {
assertThat(searchResponse.getHits().totalHits(), greaterThan(0L)); assertThat(searchResponse.getHits().totalHits(), greaterThan(0L));
clearInterceptedActions(); clearInterceptedActions();
assertSameIndices(searchRequest, SearchServiceTransportAction.QUERY_ACTION_NAME, SearchServiceTransportAction.FETCH_ID_ACTION_NAME); assertSameIndices(searchRequest, SearchTransportService.QUERY_ACTION_NAME, SearchTransportService.FETCH_ID_ACTION_NAME);
//free context messages are not necessarily sent, but if they are, check their indices //free context messages are not necessarily sent, but if they are, check their indices
assertSameIndicesOptionalRequests(searchRequest, SearchServiceTransportAction.FREE_CONTEXT_ACTION_NAME); assertSameIndicesOptionalRequests(searchRequest, SearchTransportService.FREE_CONTEXT_ACTION_NAME);
} }
public void testSearchDfsQueryThenFetch() throws Exception { public void testSearchDfsQueryThenFetch() throws Exception {
interceptTransportActions(SearchServiceTransportAction.DFS_ACTION_NAME, SearchServiceTransportAction.QUERY_ID_ACTION_NAME, interceptTransportActions(SearchTransportService.DFS_ACTION_NAME, SearchTransportService.QUERY_ID_ACTION_NAME,
SearchServiceTransportAction.FETCH_ID_ACTION_NAME, SearchServiceTransportAction.FREE_CONTEXT_ACTION_NAME); SearchTransportService.FETCH_ID_ACTION_NAME, SearchTransportService.FREE_CONTEXT_ACTION_NAME);
String[] randomIndicesOrAliases = randomIndicesOrAliases(); String[] randomIndicesOrAliases = randomIndicesOrAliases();
for (int i = 0; i < randomIndicesOrAliases.length; i++) { for (int i = 0; i < randomIndicesOrAliases.length; i++) {
@ -616,15 +616,15 @@ public class IndicesRequestTests extends ESIntegTestCase {
assertThat(searchResponse.getHits().totalHits(), greaterThan(0L)); assertThat(searchResponse.getHits().totalHits(), greaterThan(0L));
clearInterceptedActions(); clearInterceptedActions();
assertSameIndices(searchRequest, SearchServiceTransportAction.DFS_ACTION_NAME, SearchServiceTransportAction.QUERY_ID_ACTION_NAME, assertSameIndices(searchRequest, SearchTransportService.DFS_ACTION_NAME, SearchTransportService.QUERY_ID_ACTION_NAME,
SearchServiceTransportAction.FETCH_ID_ACTION_NAME); SearchTransportService.FETCH_ID_ACTION_NAME);
//free context messages are not necessarily sent, but if they are, check their indices //free context messages are not necessarily sent, but if they are, check their indices
assertSameIndicesOptionalRequests(searchRequest, SearchServiceTransportAction.FREE_CONTEXT_ACTION_NAME); assertSameIndicesOptionalRequests(searchRequest, SearchTransportService.FREE_CONTEXT_ACTION_NAME);
} }
public void testSearchQueryAndFetch() throws Exception { public void testSearchQueryAndFetch() throws Exception {
interceptTransportActions(SearchServiceTransportAction.QUERY_FETCH_ACTION_NAME, interceptTransportActions(SearchTransportService.QUERY_FETCH_ACTION_NAME,
SearchServiceTransportAction.FREE_CONTEXT_ACTION_NAME); SearchTransportService.FREE_CONTEXT_ACTION_NAME);
String[] randomIndicesOrAliases = randomIndicesOrAliases(); String[] randomIndicesOrAliases = randomIndicesOrAliases();
for (int i = 0; i < randomIndicesOrAliases.length; i++) { for (int i = 0; i < randomIndicesOrAliases.length; i++) {
@ -638,14 +638,14 @@ public class IndicesRequestTests extends ESIntegTestCase {
assertThat(searchResponse.getHits().totalHits(), greaterThan(0L)); assertThat(searchResponse.getHits().totalHits(), greaterThan(0L));
clearInterceptedActions(); clearInterceptedActions();
assertSameIndices(searchRequest, SearchServiceTransportAction.QUERY_FETCH_ACTION_NAME); assertSameIndices(searchRequest, SearchTransportService.QUERY_FETCH_ACTION_NAME);
//free context messages are not necessarily sent, but if they are, check their indices //free context messages are not necessarily sent, but if they are, check their indices
assertSameIndicesOptionalRequests(searchRequest, SearchServiceTransportAction.FREE_CONTEXT_ACTION_NAME); assertSameIndicesOptionalRequests(searchRequest, SearchTransportService.FREE_CONTEXT_ACTION_NAME);
} }
public void testSearchDfsQueryAndFetch() throws Exception { public void testSearchDfsQueryAndFetch() throws Exception {
interceptTransportActions(SearchServiceTransportAction.QUERY_QUERY_FETCH_ACTION_NAME, interceptTransportActions(SearchTransportService.QUERY_QUERY_FETCH_ACTION_NAME,
SearchServiceTransportAction.FREE_CONTEXT_ACTION_NAME); SearchTransportService.FREE_CONTEXT_ACTION_NAME);
String[] randomIndicesOrAliases = randomIndicesOrAliases(); String[] randomIndicesOrAliases = randomIndicesOrAliases();
for (int i = 0; i < randomIndicesOrAliases.length; i++) { for (int i = 0; i < randomIndicesOrAliases.length; i++) {
@ -659,9 +659,9 @@ public class IndicesRequestTests extends ESIntegTestCase {
assertThat(searchResponse.getHits().totalHits(), greaterThan(0L)); assertThat(searchResponse.getHits().totalHits(), greaterThan(0L));
clearInterceptedActions(); clearInterceptedActions();
assertSameIndices(searchRequest, SearchServiceTransportAction.QUERY_QUERY_FETCH_ACTION_NAME); assertSameIndices(searchRequest, SearchTransportService.QUERY_QUERY_FETCH_ACTION_NAME);
//free context messages are not necessarily sent, but if they are, check their indices //free context messages are not necessarily sent, but if they are, check their indices
assertSameIndicesOptionalRequests(searchRequest, SearchServiceTransportAction.FREE_CONTEXT_ACTION_NAME); assertSameIndicesOptionalRequests(searchRequest, SearchTransportService.FREE_CONTEXT_ACTION_NAME);
} }
private static void assertSameIndices(IndicesRequest originalRequest, String... actions) { private static void assertSameIndices(IndicesRequest originalRequest, String... actions) {

View File

@ -0,0 +1,23 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
esplugin {
description 'The Reindex module adds APIs to reindex from one index to another or update documents in place.'
classname 'org.elasticsearch.index.reindex.ReindexPlugin'
}

View File

@ -0,0 +1,411 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
import org.elasticsearch.action.bulk.BackoffPolicy;
import org.elasticsearch.action.bulk.BulkItemResponse;
import org.elasticsearch.action.bulk.BulkItemResponse.Failure;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.bulk.Retry;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.search.ClearScrollRequest;
import org.elasticsearch.action.search.ClearScrollResponse;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.SearchScrollRequest;
import org.elasticsearch.action.search.ShardSearchFailure;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.threadpool.ThreadPool;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import static java.lang.Math.max;
import static java.lang.Math.min;
import static java.util.Collections.emptyList;
import static java.util.Collections.unmodifiableList;
import static org.elasticsearch.action.bulk.BackoffPolicy.exponentialBackoff;
import static org.elasticsearch.common.unit.TimeValue.timeValueNanos;
import static org.elasticsearch.index.reindex.AbstractBulkByScrollRequest.SIZE_ALL_MATCHES;
import static org.elasticsearch.rest.RestStatus.CONFLICT;
import static org.elasticsearch.search.sort.SortBuilders.fieldSort;
/**
* Abstract base for scrolling across a search and executing bulk actions on all
* results.
*/
public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBulkByScrollRequest<Request>, Response> {
/**
* The request for this action. Named mainRequest because we create lots of <code>request</code> variables all representing child
* requests of this mainRequest.
*/
protected final Request mainRequest;
protected final BulkByScrollTask task;
private final AtomicLong startTime = new AtomicLong(-1);
private final AtomicReference<String> scroll = new AtomicReference<>();
private final Set<String> destinationIndices = Collections.newSetFromMap(new ConcurrentHashMap<>());
private final ESLogger logger;
private final Client client;
private final ThreadPool threadPool;
private final SearchRequest firstSearchRequest;
private final ActionListener<Response> listener;
private final Retry retry;
public AbstractAsyncBulkByScrollAction(BulkByScrollTask task, ESLogger logger, Client client, ThreadPool threadPool,
Request mainRequest, SearchRequest firstSearchRequest, ActionListener<Response> listener) {
this.task = task;
this.logger = logger;
this.client = client;
this.threadPool = threadPool;
this.mainRequest = mainRequest;
this.firstSearchRequest = firstSearchRequest;
this.listener = listener;
retry = Retry.on(EsRejectedExecutionException.class).policy(wrapBackoffPolicy(backoffPolicy()));
}
protected abstract BulkRequest buildBulk(Iterable<SearchHit> docs);
protected abstract Response buildResponse(TimeValue took, List<Failure> indexingFailures, List<ShardSearchFailure> searchFailures);
public void start() {
initialSearch();
}
public BulkByScrollTask getTask() {
return task;
}
void initialSearch() {
if (task.isCancelled()) {
finishHim(null);
return;
}
try {
// Default to sorting by _doc if it hasn't been changed.
if (firstSearchRequest.source().sorts() == null) {
firstSearchRequest.source().sort(fieldSort("_doc"));
}
startTime.set(System.nanoTime());
if (logger.isDebugEnabled()) {
logger.debug("executing initial scroll against {}{}",
firstSearchRequest.indices() == null || firstSearchRequest.indices().length == 0 ? "all indices"
: firstSearchRequest.indices(),
firstSearchRequest.types() == null || firstSearchRequest.types().length == 0 ? ""
: firstSearchRequest.types());
}
client.search(firstSearchRequest, new ActionListener<SearchResponse>() {
@Override
public void onResponse(SearchResponse response) {
logger.debug("[{}] documents match query", response.getHits().getTotalHits());
onScrollResponse(response);
}
@Override
public void onFailure(Throwable e) {
finishHim(e);
}
});
} catch (Throwable t) {
finishHim(t);
}
}
/**
* Set the last returned scrollId. Package private for testing.
*/
void setScroll(String scroll) {
this.scroll.set(scroll);
}
void onScrollResponse(SearchResponse searchResponse) {
if (task.isCancelled()) {
finishHim(null);
return;
}
setScroll(searchResponse.getScrollId());
if (searchResponse.getShardFailures() != null && searchResponse.getShardFailures().length > 0) {
startNormalTermination(emptyList(), unmodifiableList(Arrays.asList(searchResponse.getShardFailures())));
return;
}
long total = searchResponse.getHits().totalHits();
if (mainRequest.getSize() > 0) {
total = min(total, mainRequest.getSize());
}
task.setTotal(total);
threadPool.generic().execute(new AbstractRunnable() {
@Override
protected void doRun() throws Exception {
SearchHit[] docs = searchResponse.getHits().getHits();
logger.debug("scroll returned [{}] documents with a scroll id of [{}]", docs.length, searchResponse.getScrollId());
if (docs.length == 0) {
startNormalTermination(emptyList(), emptyList());
return;
}
task.countBatch();
List<SearchHit> docsIterable = Arrays.asList(docs);
if (mainRequest.getSize() != SIZE_ALL_MATCHES) {
// Truncate the docs if we have more than the request size
long remaining = max(0, mainRequest.getSize() - task.getSuccessfullyProcessed());
if (remaining < docs.length) {
docsIterable = docsIterable.subList(0, (int) remaining);
}
}
BulkRequest request = buildBulk(docsIterable);
if (request.requests().isEmpty()) {
/*
* If we noop-ed the entire batch then just skip to the next batch or the BulkRequest would fail validation.
*/
startNextScroll();
return;
}
request.timeout(mainRequest.getTimeout());
request.consistencyLevel(mainRequest.getConsistency());
if (logger.isDebugEnabled()) {
logger.debug("sending [{}] entry, [{}] bulk request", request.requests().size(),
new ByteSizeValue(request.estimatedSizeInBytes()));
}
sendBulkRequest(request);
}
@Override
public void onFailure(Throwable t) {
finishHim(t);
}
});
}
void sendBulkRequest(BulkRequest request) {
if (task.isCancelled()) {
finishHim(null);
return;
}
retry.withAsyncBackoff(client, request, new ActionListener<BulkResponse>() {
@Override
public void onResponse(BulkResponse response) {
onBulkResponse(response);
}
@Override
public void onFailure(Throwable e) {
finishHim(e);
}
});
}
void onBulkResponse(BulkResponse response) {
if (task.isCancelled()) {
finishHim(null);
return;
}
try {
List<Failure> failures = new ArrayList<Failure>();
Set<String> destinationIndicesThisBatch = new HashSet<>();
for (BulkItemResponse item : response) {
if (item.isFailed()) {
recordFailure(item.getFailure(), failures);
continue;
}
switch (item.getOpType()) {
case "index":
case "create":
IndexResponse ir = item.getResponse();
if (ir.isCreated()) {
task.countCreated();
} else {
task.countUpdated();
}
break;
case "delete":
task.countDeleted();
break;
default:
throw new IllegalArgumentException("Unknown op type: " + item.getOpType());
}
// Track the indexes we've seen so we can refresh them if requested
destinationIndices.add(item.getIndex());
}
destinationIndices.addAll(destinationIndicesThisBatch);
if (false == failures.isEmpty()) {
startNormalTermination(unmodifiableList(failures), emptyList());
return;
}
if (mainRequest.getSize() != SIZE_ALL_MATCHES && task.getSuccessfullyProcessed() >= mainRequest.getSize()) {
// We've processed all the requested docs.
startNormalTermination(emptyList(), emptyList());
return;
}
startNextScroll();
} catch (Throwable t) {
finishHim(t);
}
}
void startNextScroll() {
if (task.isCancelled()) {
finishHim(null);
return;
}
SearchScrollRequest request = new SearchScrollRequest();
request.scrollId(scroll.get()).scroll(firstSearchRequest.scroll());
client.searchScroll(request, new ActionListener<SearchResponse>() {
@Override
public void onResponse(SearchResponse response) {
onScrollResponse(response);
}
@Override
public void onFailure(Throwable e) {
finishHim(e);
}
});
}
private void recordFailure(Failure failure, List<Failure> failures) {
if (failure.getStatus() == CONFLICT) {
task.countVersionConflict();
if (false == mainRequest.isAbortOnVersionConflict()) {
return;
}
}
failures.add(failure);
}
void startNormalTermination(List<Failure> indexingFailures, List<ShardSearchFailure> searchFailures) {
if (false == mainRequest.isRefresh()) {
finishHim(null, indexingFailures, searchFailures);
return;
}
RefreshRequest refresh = new RefreshRequest();
refresh.indices(destinationIndices.toArray(new String[destinationIndices.size()]));
client.admin().indices().refresh(refresh, new ActionListener<RefreshResponse>() {
@Override
public void onResponse(RefreshResponse response) {
finishHim(null, indexingFailures, searchFailures);
}
@Override
public void onFailure(Throwable e) {
finishHim(e);
}
});
}
/**
* Finish the request.
*
* @param failure if non null then the request failed catastrophically with this exception
*/
void finishHim(Throwable failure) {
finishHim(failure, emptyList(), emptyList());
}
/**
* Finish the request.
*
* @param failure if non null then the request failed catastrophically with this exception
* @param indexingFailures any indexing failures accumulated during the request
* @param searchFailures any search failures accumulated during the request
*/
void finishHim(Throwable failure, List<Failure> indexingFailures, List<ShardSearchFailure> searchFailures) {
String scrollId = scroll.get();
if (Strings.hasLength(scrollId)) {
/*
* Fire off the clear scroll but don't wait for it it return before
* we send the use their response.
*/
ClearScrollRequest clearScrollRequest = new ClearScrollRequest();
clearScrollRequest.addScrollId(scrollId);
client.clearScroll(clearScrollRequest, new ActionListener<ClearScrollResponse>() {
@Override
public void onResponse(ClearScrollResponse response) {
logger.debug("Freed [{}] contexts", response.getNumFreed());
}
@Override
public void onFailure(Throwable e) {
logger.warn("Failed to clear scroll [" + scrollId + ']', e);
}
});
}
if (failure == null) {
listener.onResponse(buildResponse(timeValueNanos(System.nanoTime() - startTime.get()), indexingFailures, searchFailures));
} else {
listener.onFailure(failure);
}
}
/**
* Build the backoff policy for use with retries. Package private for testing.
*/
BackoffPolicy backoffPolicy() {
return exponentialBackoff(mainRequest.getRetryBackoffInitialTime(), mainRequest.getMaxRetries());
}
/**
* Wraps a backoffPolicy in another policy that counts the number of backoffs acquired.
*/
private BackoffPolicy wrapBackoffPolicy(BackoffPolicy backoffPolicy) {
return new BackoffPolicy() {
@Override
public Iterator<TimeValue> iterator() {
return new Iterator<TimeValue>() {
private final Iterator<TimeValue> delegate = backoffPolicy.iterator();
@Override
public boolean hasNext() {
return delegate.hasNext();
}
@Override
public TimeValue next() {
if (false == delegate.hasNext()) {
return null;
}
task.countRetry();
return delegate.next();
}
};
}
};
}
}

View File

@ -0,0 +1,238 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.index.mapper.internal.IdFieldMapper;
import org.elasticsearch.index.mapper.internal.IndexFieldMapper;
import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
import org.elasticsearch.index.mapper.internal.RoutingFieldMapper;
import org.elasticsearch.index.mapper.internal.SourceFieldMapper;
import org.elasticsearch.index.mapper.internal.TTLFieldMapper;
import org.elasticsearch.index.mapper.internal.TimestampFieldMapper;
import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
import org.elasticsearch.index.mapper.internal.VersionFieldMapper;
import org.elasticsearch.script.CompiledScript;
import org.elasticsearch.script.ExecutableScript;
import org.elasticsearch.script.ScriptContext;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.SearchHitField;
import org.elasticsearch.threadpool.ThreadPool;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
import static java.util.Collections.emptyMap;
/**
* Abstract base for scrolling across a search and executing bulk indexes on all
* results.
*/
public abstract class AbstractAsyncBulkIndexByScrollAction<
Request extends AbstractBulkIndexByScrollRequest<Request>,
Response extends BulkIndexByScrollResponse>
extends AbstractAsyncBulkByScrollAction<Request, Response> {
private final ScriptService scriptService;
private final CompiledScript script;
public AbstractAsyncBulkIndexByScrollAction(BulkByScrollTask task, ESLogger logger, ScriptService scriptService,
Client client, ThreadPool threadPool, Request mainRequest, SearchRequest firstSearchRequest,
ActionListener<Response> listener) {
super(task, logger, client, threadPool, mainRequest, firstSearchRequest, listener);
this.scriptService = scriptService;
if (mainRequest.getScript() == null) {
script = null;
} else {
script = scriptService.compile(mainRequest.getScript(), ScriptContext.Standard.UPDATE, emptyMap());
}
}
/**
* Build the IndexRequest for a single search hit. This shouldn't handle
* metadata or the script. That will be handled by copyMetadata and
* applyScript functions that can be overridden.
*/
protected abstract IndexRequest buildIndexRequest(SearchHit doc);
@Override
protected BulkRequest buildBulk(Iterable<SearchHit> docs) {
BulkRequest bulkRequest = new BulkRequest();
ExecutableScript executableScript = null;
Map<String, Object> scriptCtx = null;
for (SearchHit doc : docs) {
IndexRequest index = buildIndexRequest(doc);
copyMetadata(index, doc);
if (script != null) {
if (executableScript == null) {
executableScript = scriptService.executable(script, mainRequest.getScript().getParams());
scriptCtx = new HashMap<>();
}
if (false == applyScript(index, doc, executableScript, scriptCtx)) {
continue;
}
}
bulkRequest.add(index);
}
return bulkRequest;
}
/**
* Copies the metadata from a hit to the index request.
*/
protected void copyMetadata(IndexRequest index, SearchHit doc) {
index.parent(fieldValue(doc, ParentFieldMapper.NAME));
copyRouting(index, doc);
// Comes back as a Long but needs to be a string
Long timestamp = fieldValue(doc, TimestampFieldMapper.NAME);
if (timestamp != null) {
index.timestamp(timestamp.toString());
}
Long ttl = fieldValue(doc, TTLFieldMapper.NAME);
if (ttl != null) {
index.ttl(ttl);
}
}
/**
* Part of copyMetadata but called out individual for easy overwriting.
*/
protected void copyRouting(IndexRequest index, SearchHit doc) {
index.routing(fieldValue(doc, RoutingFieldMapper.NAME));
}
protected <T> T fieldValue(SearchHit doc, String fieldName) {
SearchHitField field = doc.field(fieldName);
return field == null ? null : field.value();
}
/**
* Apply a script to the request.
*
* @return is this request still ok to apply (true) or is it a noop (false)
*/
@SuppressWarnings("unchecked")
protected boolean applyScript(IndexRequest index, SearchHit doc, ExecutableScript script, final Map<String, Object> ctx) {
if (script == null) {
return true;
}
ctx.put(IndexFieldMapper.NAME, doc.index());
ctx.put(TypeFieldMapper.NAME, doc.type());
ctx.put(IdFieldMapper.NAME, doc.id());
Long oldVersion = doc.getVersion();
ctx.put(VersionFieldMapper.NAME, oldVersion);
String oldParent = fieldValue(doc, ParentFieldMapper.NAME);
ctx.put(ParentFieldMapper.NAME, oldParent);
String oldRouting = fieldValue(doc, RoutingFieldMapper.NAME);
ctx.put(RoutingFieldMapper.NAME, oldRouting);
Long oldTimestamp = fieldValue(doc, TimestampFieldMapper.NAME);
ctx.put(TimestampFieldMapper.NAME, oldTimestamp);
Long oldTTL = fieldValue(doc, TTLFieldMapper.NAME);
ctx.put(TTLFieldMapper.NAME, oldTTL);
ctx.put(SourceFieldMapper.NAME, index.sourceAsMap());
ctx.put("op", "update");
script.setNextVar("ctx", ctx);
script.run();
Map<String, Object> resultCtx = (Map<String, Object>) script.unwrap(ctx);
String newOp = (String) resultCtx.remove("op");
if (newOp == null) {
throw new IllegalArgumentException("Script cleared op!");
}
if ("noop".equals(newOp)) {
task.countNoop();
return false;
}
if (false == "update".equals(newOp)) {
throw new IllegalArgumentException("Invalid op [" + newOp + ']');
}
/*
* It'd be lovely to only set the source if we know its been modified
* but it isn't worth keeping two copies of it around just to check!
*/
index.source((Map<String, Object>) resultCtx.remove(SourceFieldMapper.NAME));
Object newValue = ctx.remove(IndexFieldMapper.NAME);
if (false == doc.index().equals(newValue)) {
scriptChangedIndex(index, newValue);
}
newValue = ctx.remove(TypeFieldMapper.NAME);
if (false == doc.type().equals(newValue)) {
scriptChangedType(index, newValue);
}
newValue = ctx.remove(IdFieldMapper.NAME);
if (false == doc.id().equals(newValue)) {
scriptChangedId(index, newValue);
}
newValue = ctx.remove(VersionFieldMapper.NAME);
if (false == Objects.equals(oldVersion, newValue)) {
scriptChangedVersion(index, newValue);
}
newValue = ctx.remove(ParentFieldMapper.NAME);
if (false == Objects.equals(oldParent, newValue)) {
scriptChangedParent(index, newValue);
}
/*
* Its important that routing comes after parent in case you want to
* change them both.
*/
newValue = ctx.remove(RoutingFieldMapper.NAME);
if (false == Objects.equals(oldRouting, newValue)) {
scriptChangedRouting(index, newValue);
}
newValue = ctx.remove(TimestampFieldMapper.NAME);
if (false == Objects.equals(oldTimestamp, newValue)) {
scriptChangedTimestamp(index, newValue);
}
newValue = ctx.remove(TTLFieldMapper.NAME);
if (false == Objects.equals(oldTTL, newValue)) {
scriptChangedTTL(index, newValue);
}
if (false == ctx.isEmpty()) {
throw new IllegalArgumentException("Invalid fields added to ctx [" + String.join(",", ctx.keySet()) + ']');
}
return true;
}
protected abstract void scriptChangedIndex(IndexRequest index, Object to);
protected abstract void scriptChangedType(IndexRequest index, Object to);
protected abstract void scriptChangedId(IndexRequest index, Object to);
protected abstract void scriptChangedVersion(IndexRequest index, Object to);
protected abstract void scriptChangedRouting(IndexRequest index, Object to);
protected abstract void scriptChangedParent(IndexRequest index, Object to);
protected abstract void scriptChangedTimestamp(IndexRequest index, Object to);
protected abstract void scriptChangedTTL(IndexRequest index, Object to);
}

View File

@ -0,0 +1,83 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.TransportAction;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.BytesRestResponse;
import org.elasticsearch.rest.RestChannel;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.search.aggregations.AggregatorParsers;
import org.elasticsearch.tasks.LoggingTaskListener;
import org.elasticsearch.tasks.Task;
import java.io.IOException;
public abstract class AbstractBaseReindexRestHandler<Request extends ActionRequest<Request>, Response extends BulkIndexByScrollResponse,
TA extends TransportAction<Request, Response>> extends BaseRestHandler {
protected final IndicesQueriesRegistry indicesQueriesRegistry;
protected final AggregatorParsers aggParsers;
private final ClusterService clusterService;
private final TA action;
protected AbstractBaseReindexRestHandler(Settings settings, Client client,
IndicesQueriesRegistry indicesQueriesRegistry, AggregatorParsers aggParsers, ClusterService clusterService, TA action) {
super(settings, client);
this.indicesQueriesRegistry = indicesQueriesRegistry;
this.aggParsers = aggParsers;
this.clusterService = clusterService;
this.action = action;
}
protected void execute(RestRequest request, Request internalRequest, RestChannel channel) throws IOException {
if (request.paramAsBoolean("wait_for_completion", true)) {
action.execute(internalRequest, new BulkIndexByScrollResponseContentListener<Response>(channel));
return;
}
/*
* Lets try and validate before forking so the user gets some error. The
* task can't totally validate until it starts but this is better than
* nothing.
*/
ActionRequestValidationException validationException = internalRequest.validate();
if (validationException != null) {
channel.sendResponse(new BytesRestResponse(channel, validationException));
return;
}
Task task = action.execute(internalRequest, LoggingTaskListener.instance());
sendTask(channel, task);
}
private void sendTask(RestChannel channel, Task task) throws IOException {
XContentBuilder builder = channel.newBuilder();
builder.startObject();
builder.field("task", clusterService.localNode().getId() + ":" + task.getId());
builder.endObject();
channel.sendResponse(new BytesRestResponse(RestStatus.OK, builder));
}
}

View File

@ -0,0 +1,301 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.WriteConsistencyLevel;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.support.replication.ReplicationRequest;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.tasks.Task;
import java.io.IOException;
import java.util.Arrays;
import static org.elasticsearch.action.ValidateActions.addValidationError;
import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes;
public abstract class AbstractBulkByScrollRequest<Self extends AbstractBulkByScrollRequest<Self>>
extends ActionRequest<Self> {
public static final int SIZE_ALL_MATCHES = -1;
private static final TimeValue DEFAULT_SCROLL_TIMEOUT = timeValueMinutes(5);
private static final int DEFAULT_SCROLL_SIZE = 100;
/**
* The search to be executed.
*/
private SearchRequest searchRequest;
/**
* Maximum number of processed documents. Defaults to -1 meaning process all
* documents.
*/
private int size = SIZE_ALL_MATCHES;
/**
* Should version conflicts cause aborts? Defaults to true.
*/
private boolean abortOnVersionConflict = true;
/**
* Call refresh on the indexes we've written to after the request ends?
*/
private boolean refresh = false;
/**
* Timeout to wait for the shards on to be available for each bulk request?
*/
private TimeValue timeout = ReplicationRequest.DEFAULT_TIMEOUT;
/**
* Consistency level for write requests.
*/
private WriteConsistencyLevel consistency = WriteConsistencyLevel.DEFAULT;
/**
* Initial delay after a rejection before retrying a bulk request. With the default maxRetries the total backoff for retrying rejections
* is about one minute per bulk request. Once the entire bulk request is successful the retry counter resets.
*/
private TimeValue retryBackoffInitialTime = timeValueMillis(500);
/**
* Total number of retries attempted for rejections. There is no way to ask for unlimited retries.
*/
private int maxRetries = 11;
public AbstractBulkByScrollRequest() {
}
public AbstractBulkByScrollRequest(SearchRequest source) {
this.searchRequest = source;
// Set the defaults which differ from SearchRequest's defaults.
source.scroll(DEFAULT_SCROLL_TIMEOUT);
source.source(new SearchSourceBuilder());
source.source().version(true);
source.source().size(DEFAULT_SCROLL_SIZE);
}
/**
* `this` cast to Self. Used for building fluent methods without cast
* warnings.
*/
protected abstract Self self();
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException e = searchRequest.validate();
if (searchRequest.source().from() != -1) {
e = addValidationError("from is not supported in this context", e);
}
if (maxRetries < 0) {
e = addValidationError("retries cannnot be negative", e);
}
if (false == (size == -1 || size > 0)) {
e = addValidationError(
"size should be greater than 0 if the request is limited to some number of documents or -1 if it isn't but it was ["
+ size + "]",
e);
}
return e;
}
/**
* Maximum number of processed documents. Defaults to -1 meaning process all
* documents.
*/
public int getSize() {
return size;
}
/**
* Maximum number of processed documents. Defaults to -1 meaning process all
* documents.
*/
public Self setSize(int size) {
this.size = size;
return self();
}
/**
* Should version conflicts cause aborts? Defaults to false.
*/
public boolean isAbortOnVersionConflict() {
return abortOnVersionConflict;
}
/**
* Should version conflicts cause aborts? Defaults to false.
*/
public Self setAbortOnVersionConflict(boolean abortOnVersionConflict) {
this.abortOnVersionConflict = abortOnVersionConflict;
return self();
}
/**
* Sets abortOnVersionConflict based on REST-friendly names.
*/
public void setConflicts(String conflicts) {
switch (conflicts) {
case "proceed":
setAbortOnVersionConflict(false);
return;
case "abort":
setAbortOnVersionConflict(true);
return;
default:
throw new IllegalArgumentException("conflicts may only be \"proceed\" or \"abort\" but was [" + conflicts + "]");
}
}
/**
* The search request that matches the documents to process.
*/
public SearchRequest getSearchRequest() {
return searchRequest;
}
/**
* Call refresh on the indexes we've written to after the request ends?
*/
public boolean isRefresh() {
return refresh;
}
/**
* Call refresh on the indexes we've written to after the request ends?
*/
public Self setRefresh(boolean refresh) {
this.refresh = refresh;
return self();
}
/**
* Timeout to wait for the shards on to be available for each bulk request?
*/
public TimeValue getTimeout() {
return timeout;
}
/**
* Timeout to wait for the shards on to be available for each bulk request?
*/
public Self setTimeout(TimeValue timeout) {
this.timeout = timeout;
return self();
}
/**
* Consistency level for write requests.
*/
public WriteConsistencyLevel getConsistency() {
return consistency;
}
/**
* Consistency level for write requests.
*/
public Self setConsistency(WriteConsistencyLevel consistency) {
this.consistency = consistency;
return self();
}
/**
* Initial delay after a rejection before retrying request.
*/
public TimeValue getRetryBackoffInitialTime() {
return retryBackoffInitialTime;
}
/**
* Set the initial delay after a rejection before retrying request.
*/
public Self setRetryBackoffInitialTime(TimeValue retryBackoffInitialTime) {
this.retryBackoffInitialTime = retryBackoffInitialTime;
return self();
}
/**
* Total number of retries attempted for rejections.
*/
public int getMaxRetries() {
return maxRetries;
}
/**
* Set the total number of retries attempted for rejections. There is no way to ask for unlimited retries.
*/
public Self setMaxRetries(int maxRetries) {
this.maxRetries = maxRetries;
return self();
}
@Override
public Task createTask(long id, String type, String action) {
return new BulkByScrollTask(id, type, action, getDescription());
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
searchRequest = new SearchRequest();
searchRequest.readFrom(in);
abortOnVersionConflict = in.readBoolean();
size = in.readVInt();
refresh = in.readBoolean();
timeout = TimeValue.readTimeValue(in);
consistency = WriteConsistencyLevel.fromId(in.readByte());
retryBackoffInitialTime = TimeValue.readTimeValue(in);
maxRetries = in.readVInt();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
searchRequest.writeTo(out);
out.writeBoolean(abortOnVersionConflict);
out.writeVInt(size);
out.writeBoolean(refresh);
timeout.writeTo(out);
out.writeByte(consistency.id());
retryBackoffInitialTime.writeTo(out);
out.writeVInt(maxRetries);
}
/**
* Append a short description of the search request to a StringBuilder. Used
* to make toString.
*/
protected void searchToString(StringBuilder b) {
if (searchRequest.indices() != null && searchRequest.indices().length != 0) {
b.append(Arrays.toString(searchRequest.indices()));
} else {
b.append("[all indices]");
}
if (searchRequest.types() != null && searchRequest.types().length != 0) {
b.append(Arrays.toString(searchRequest.types()));
}
}
}

View File

@ -0,0 +1,109 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionRequestBuilder;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.WriteConsistencyLevel;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.query.QueryBuilder;
public abstract class AbstractBulkByScrollRequestBuilder<
Request extends AbstractBulkByScrollRequest<Request>,
Response extends ActionResponse,
Self extends AbstractBulkByScrollRequestBuilder<Request, Response, Self>>
extends ActionRequestBuilder<Request, Response, Self> {
private final SearchRequestBuilder source;
protected AbstractBulkByScrollRequestBuilder(ElasticsearchClient client,
Action<Request, Response, Self> action, SearchRequestBuilder source, Request request) {
super(client, action, request);
this.source = source;
}
protected abstract Self self();
/**
* The search used to find documents to process.
*/
public SearchRequestBuilder source() {
return source;
}
/**
* Set the source indices.
*/
public Self source(String... indices) {
source.setIndices(indices);
return self();
}
/**
* Set the query that will filter the source. Just a convenience method for
* easy chaining.
*/
public Self filter(QueryBuilder<?> filter) {
source.setQuery(filter);
return self();
}
/**
* The maximum number of documents to attempt.
*/
public Self size(int size) {
request.setSize(size);
return self();
}
/**
* Should we version conflicts cause the action to abort?
*/
public Self abortOnVersionConflict(boolean abortOnVersionConflict) {
request.setAbortOnVersionConflict(abortOnVersionConflict);
return self();
}
/**
* Call refresh on the indexes we've written to after the request ends?
*/
public Self refresh(boolean refresh) {
request.setRefresh(refresh);
return self();
}
/**
* Timeout to wait for the shards on to be available for each bulk request.
*/
public Self timeout(TimeValue timeout) {
request.setTimeout(timeout);
return self();
}
/**
* Consistency level for write requests.
*/
public Self consistency(WriteConsistencyLevel consistency) {
request.setConsistency(consistency);
return self();
}
}

View File

@ -0,0 +1,80 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.script.Script;
import java.io.IOException;
public abstract class AbstractBulkIndexByScrollRequest<Self extends AbstractBulkIndexByScrollRequest<Self>>
extends AbstractBulkByScrollRequest<Self> {
/**
* Script to modify the documents before they are processed.
*/
private Script script;
public AbstractBulkIndexByScrollRequest() {
}
public AbstractBulkIndexByScrollRequest(SearchRequest source) {
super(source);
}
/**
* Script to modify the documents before they are processed.
*/
public Script getScript() {
return script;
}
/**
* Script to modify the documents before they are processed.
*/
public Self setScript(@Nullable Script script) {
this.script = script;
return self();
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
if (in.readBoolean()) {
script = Script.readScript(in);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeOptionalStreamable(script);
}
@Override
protected void searchToString(StringBuilder b) {
super.searchToString(b);
if (script != null) {
b.append(" updated with [").append(script).append(']');
}
}
}

View File

@ -0,0 +1,46 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.script.Script;
public abstract class AbstractBulkIndexByScrollRequestBuilder<
Request extends AbstractBulkIndexByScrollRequest<Request>,
Response extends ActionResponse,
Self extends AbstractBulkIndexByScrollRequestBuilder<Request, Response, Self>>
extends AbstractBulkByScrollRequestBuilder<Request, Response, Self> {
protected AbstractBulkIndexByScrollRequestBuilder(ElasticsearchClient client,
Action<Request, Response, Self> action, SearchRequestBuilder search, Request request) {
super(client, action, search, request);
}
/**
* Script to modify the documents before they are processed.
*/
public Self script(Script script) {
request.setScript(script);
return self();
}
}

View File

@ -0,0 +1,290 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.tasks.CancellableTask;
import org.elasticsearch.tasks.Task;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
/**
* Task storing information about a currently running BulkByScroll request.
*/
public class BulkByScrollTask extends CancellableTask {
/**
* The total number of documents this request will process. 0 means we don't yet know or, possibly, there are actually 0 documents
* to process. Its ok that these have the same meaning because any request with 0 actual documents should be quite short lived.
*/
private final AtomicLong total = new AtomicLong(0);
private final AtomicLong updated = new AtomicLong(0);
private final AtomicLong created = new AtomicLong(0);
private final AtomicLong deleted = new AtomicLong(0);
private final AtomicLong noops = new AtomicLong(0);
private final AtomicInteger batch = new AtomicInteger(0);
private final AtomicLong versionConflicts = new AtomicLong(0);
private final AtomicLong retries = new AtomicLong(0);
public BulkByScrollTask(long id, String type, String action, String description) {
super(id, type, action, description);
}
@Override
public Status getStatus() {
return new Status(total.get(), updated.get(), created.get(), deleted.get(), batch.get(), versionConflicts.get(), noops.get(),
retries.get(), getReasonCancelled());
}
/**
* Total number of successfully processed documents.
*/
public long getSuccessfullyProcessed() {
return updated.get() + created.get() + deleted.get();
}
public static class Status implements Task.Status {
public static final Status PROTOTYPE = new Status(0, 0, 0, 0, 0, 0, 0, 0, null);
private final long total;
private final long updated;
private final long created;
private final long deleted;
private final int batches;
private final long versionConflicts;
private final long noops;
private final long retries;
private final String reasonCancelled;
public Status(long total, long updated, long created, long deleted, int batches, long versionConflicts, long noops, long retries,
@Nullable String reasonCancelled) {
this.total = checkPositive(total, "total");
this.updated = checkPositive(updated, "updated");
this.created = checkPositive(created, "created");
this.deleted = checkPositive(deleted, "deleted");
this.batches = checkPositive(batches, "batches");
this.versionConflicts = checkPositive(versionConflicts, "versionConflicts");
this.noops = checkPositive(noops, "noops");
this.retries = checkPositive(retries, "retries");
this.reasonCancelled = reasonCancelled;
}
public Status(StreamInput in) throws IOException {
total = in.readVLong();
updated = in.readVLong();
created = in.readVLong();
deleted = in.readVLong();
batches = in.readVInt();
versionConflicts = in.readVLong();
noops = in.readVLong();
retries = in.readVLong();
reasonCancelled = in.readOptionalString();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVLong(total);
out.writeVLong(updated);
out.writeVLong(created);
out.writeVLong(deleted);
out.writeVInt(batches);
out.writeVLong(versionConflicts);
out.writeVLong(noops);
out.writeVLong(retries);
out.writeOptionalString(reasonCancelled);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
innerXContent(builder, params, true, true);
return builder.endObject();
}
public XContentBuilder innerXContent(XContentBuilder builder, Params params, boolean includeCreated, boolean includeDeleted)
throws IOException {
builder.field("total", total);
builder.field("updated", updated);
if (includeCreated) {
builder.field("created", created);
}
if (includeDeleted) {
builder.field("deleted", deleted);
}
builder.field("batches", batches);
builder.field("version_conflicts", versionConflicts);
builder.field("noops", noops);
builder.field("retries", retries);
if (reasonCancelled != null) {
builder.field("canceled", reasonCancelled);
}
return builder;
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("BulkIndexByScrollResponse[");
innerToString(builder, true, true);
return builder.append(']').toString();
}
public void innerToString(StringBuilder builder, boolean includeCreated, boolean includeDeleted) {
builder.append("updated=").append(updated);
if (includeCreated) {
builder.append(",created=").append(created);
}
if (includeDeleted) {
builder.append(",deleted=").append(deleted);
}
builder.append(",batches=").append(batches);
builder.append(",versionConflicts=").append(versionConflicts);
builder.append(",noops=").append(noops);
builder.append(",retries=").append(retries);
if (reasonCancelled != null) {
builder.append(",canceled=").append(reasonCancelled);
}
}
@Override
public String getWriteableName() {
return "bulk-by-scroll";
}
@Override
public Status readFrom(StreamInput in) throws IOException {
return new Status(in);
}
/**
* The total number of documents this request will process. 0 means we don't yet know or, possibly, there are actually 0 documents
* to process. Its ok that these have the same meaning because any request with 0 actual documents should be quite short lived.
*/
public long getTotal() {
return total;
}
/**
* Count of documents updated.
*/
public long getUpdated() {
return updated;
}
/**
* Count of documents created.
*/
public long getCreated() {
return created;
}
/**
* Count of successful delete operations.
*/
public long getDeleted() {
return deleted;
}
/**
* Number of scan responses this request has processed.
*/
public int getBatches() {
return batches;
}
/**
* Number of version conflicts this request has hit.
*/
public long getVersionConflicts() {
return versionConflicts;
}
/**
* Number of noops (skipped bulk items) as part of this request.
*/
public long getNoops() {
return noops;
}
/**
* Number of retries that had to be attempted due to rejected executions.
*/
public long getRetries() {
return retries;
}
/**
* The reason that the request was canceled or null if it hasn't been.
*/
public String getReasonCancelled() {
return reasonCancelled;
}
private int checkPositive(int value, String name) {
if (value < 0) {
throw new IllegalArgumentException(name + " must be greater than 0 but was [" + value + "]");
}
return value;
}
private long checkPositive(long value, String name) {
if (value < 0) {
throw new IllegalArgumentException(name + " must be greater than 0 but was [" + value + "]");
}
return value;
}
}
void setTotal(long totalHits) {
total.set(totalHits);
}
void countBatch() {
batch.incrementAndGet();
}
void countNoop() {
noops.incrementAndGet();
}
void countCreated() {
created.incrementAndGet();
}
void countUpdated() {
updated.incrementAndGet();
}
void countDeleted() {
deleted.incrementAndGet();
}
void countVersionConflict() {
versionConflicts.incrementAndGet();
}
void countRetry() {
retries.incrementAndGet();
}
}

View File

@ -0,0 +1,169 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.bulk.BulkItemResponse.Failure;
import org.elasticsearch.action.search.ShardSearchFailure;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import static java.lang.Math.min;
import static java.util.Collections.unmodifiableList;
import static java.util.Objects.requireNonNull;
import static org.elasticsearch.action.search.ShardSearchFailure.readShardSearchFailure;
/**
* Response used for actions that index many documents using a scroll request.
*/
public class BulkIndexByScrollResponse extends ActionResponse implements ToXContent {
private TimeValue took;
private BulkByScrollTask.Status status;
private List<Failure> indexingFailures;
private List<ShardSearchFailure> searchFailures;
public BulkIndexByScrollResponse() {
}
public BulkIndexByScrollResponse(TimeValue took, BulkByScrollTask.Status status, List<Failure> indexingFailures,
List<ShardSearchFailure> searchFailures) {
this.took = took;
this.status = requireNonNull(status, "Null status not supported");
this.indexingFailures = indexingFailures;
this.searchFailures = searchFailures;
}
public TimeValue getTook() {
return took;
}
protected BulkByScrollTask.Status getStatus() {
return status;
}
public long getUpdated() {
return status.getUpdated();
}
public int getBatches() {
return status.getBatches();
}
public long getVersionConflicts() {
return status.getVersionConflicts();
}
public long getNoops() {
return status.getNoops();
}
/**
* The reason that the request was canceled or null if it hasn't been.
*/
public String getReasonCancelled() {
return status.getReasonCancelled();
}
/**
* All of the indexing failures. Version conflicts are only included if the request sets abortOnVersionConflict to true (the
* default).
*/
public List<Failure> getIndexingFailures() {
return indexingFailures;
}
/**
* All search failures.
*/
public List<ShardSearchFailure> getSearchFailures() {
return searchFailures;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
took.writeTo(out);
status.writeTo(out);
out.writeVInt(indexingFailures.size());
for (Failure failure: indexingFailures) {
failure.writeTo(out);
}
out.writeVInt(searchFailures.size());
for (ShardSearchFailure failure: searchFailures) {
failure.writeTo(out);
}
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
took = TimeValue.readTimeValue(in);
status = new BulkByScrollTask.Status(in);
int indexingFailuresCount = in.readVInt();
List<Failure> indexingFailures = new ArrayList<>(indexingFailuresCount);
for (int i = 0; i < indexingFailuresCount; i++) {
indexingFailures.add(Failure.PROTOTYPE.readFrom(in));
}
this.indexingFailures = unmodifiableList(indexingFailures);
int searchFailuresCount = in.readVInt();
List<ShardSearchFailure> searchFailures = new ArrayList<>(searchFailuresCount);
for (int i = 0; i < searchFailuresCount; i++) {
searchFailures.add(readShardSearchFailure(in));
}
this.searchFailures = unmodifiableList(searchFailures);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.field("took", took.millis());
status.innerXContent(builder, params, false, false);
builder.startArray("failures");
for (Failure failure: indexingFailures) {
builder.startObject();
failure.toXContent(builder, params);
builder.endObject();
}
for (ShardSearchFailure failure: searchFailures) {
builder.startObject();
failure.toXContent(builder, params);
builder.endObject();
}
builder.endArray();
return builder;
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("BulkIndexByScrollResponse[");
builder.append("took=").append(took).append(',');
status.innerToString(builder, false, false);
builder.append(",indexing_failures=").append(getIndexingFailures().subList(0, min(3, getIndexingFailures().size())));
builder.append(",search_failures=").append(getSearchFailures().subList(0, min(3, getSearchFailures().size())));
return builder.append(']').toString();
}
}

View File

@ -0,0 +1,46 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.action.bulk.BulkItemResponse.Failure;
import org.elasticsearch.rest.RestChannel;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.rest.action.support.RestToXContentListener;
/**
* Just like RestToXContentListener but will return higher than 200 status if
* there are any failures.
*/
public class BulkIndexByScrollResponseContentListener<R extends BulkIndexByScrollResponse> extends RestToXContentListener<R> {
public BulkIndexByScrollResponseContentListener(RestChannel channel) {
super(channel);
}
@Override
protected RestStatus getStatus(R response) {
RestStatus status = RestStatus.OK;
for (Failure failure : response.getIndexingFailures()) {
if (failure.getStatus().getStatus() > status.getStatus()) {
status = failure.getStatus();
}
}
return status;
}
}

View File

@ -0,0 +1,42 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.action.Action;
import org.elasticsearch.client.ElasticsearchClient;
public class ReindexAction extends Action<ReindexRequest, ReindexResponse, ReindexRequestBuilder> {
public static final ReindexAction INSTANCE = new ReindexAction();
public static final String NAME = "indices:data/write/reindex";
private ReindexAction() {
super(NAME);
}
@Override
public ReindexRequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new ReindexRequestBuilder(client, this);
}
@Override
public ReindexResponse newResponse() {
return new ReindexResponse();
}
}

View File

@ -0,0 +1,48 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.action.ActionModule;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.plugins.Plugin;
public class ReindexPlugin extends Plugin {
public static final String NAME = "reindex";
@Override
public String name() {
return NAME;
}
@Override
public String description() {
return "The Reindex module adds APIs to reindex from one index to another or update documents in place.";
}
public void onModule(ActionModule actionModule) {
actionModule.registerAction(ReindexAction.INSTANCE, TransportReindexAction.class);
actionModule.registerAction(UpdateByQueryAction.INSTANCE, TransportUpdateByQueryAction.class);
}
public void onModule(NetworkModule restModule) {
restModule.registerRestHandler(RestReindexAction.class);
restModule.registerRestHandler(RestUpdateByQueryAction.class);
}
}

View File

@ -0,0 +1,126 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.lucene.uid.Versions;
import java.io.IOException;
import static org.elasticsearch.action.ValidateActions.addValidationError;
import static org.elasticsearch.index.VersionType.INTERNAL;
public class ReindexRequest extends AbstractBulkIndexByScrollRequest<ReindexRequest> {
/**
* Prototype for index requests.
*/
private IndexRequest destination;
public ReindexRequest() {
}
public ReindexRequest(SearchRequest search, IndexRequest destination) {
super(search);
this.destination = destination;
}
@Override
protected ReindexRequest self() {
return this;
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException e = super.validate();
if (getSearchRequest().indices() == null || getSearchRequest().indices().length == 0) {
e = addValidationError("use _all if you really want to copy from all existing indexes", e);
}
/*
* Note that we don't call index's validator - it won't work because
* we'll be filling in portions of it as we receive the docs. But we can
* validate some things so we do that below.
*/
if (destination.index() == null) {
e = addValidationError("index must be specified", e);
return e;
}
if (false == routingIsValid()) {
e = addValidationError("routing must be unset, [keep], [discard] or [=<some new value>]", e);
}
if (destination.versionType() == INTERNAL) {
if (destination.version() != Versions.MATCH_ANY && destination.version() != Versions.MATCH_DELETED) {
e = addValidationError("unsupported version for internal versioning [" + destination.version() + ']', e);
}
}
if (destination.ttl() != null) {
e = addValidationError("setting ttl on destination isn't supported. use scripts instead.", e);
}
if (destination.timestamp() != null) {
e = addValidationError("setting timestamp on destination isn't supported. use scripts instead.", e);
}
return e;
}
private boolean routingIsValid() {
if (destination.routing() == null || destination.routing().startsWith("=")) {
return true;
}
switch (destination.routing()) {
case "keep":
case "discard":
return true;
default:
return false;
}
}
public IndexRequest getDestination() {
return destination;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
destination = new IndexRequest();
destination.readFrom(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
destination.writeTo(out);
}
@Override
public String toString() {
StringBuilder b = new StringBuilder();
b.append("reindex from ");
searchToString(b);
b.append(" to [").append(destination.index()).append(']');
if (destination.type() != null) {
b.append('[').append(destination.type()).append(']');
}
return b.toString();
}
}

View File

@ -0,0 +1,70 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.index.IndexAction;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.search.SearchAction;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.client.ElasticsearchClient;
public class ReindexRequestBuilder extends
AbstractBulkIndexByScrollRequestBuilder<ReindexRequest, ReindexResponse, ReindexRequestBuilder> {
private final IndexRequestBuilder destination;
public ReindexRequestBuilder(ElasticsearchClient client,
Action<ReindexRequest, ReindexResponse, ReindexRequestBuilder> action) {
this(client, action, new SearchRequestBuilder(client, SearchAction.INSTANCE),
new IndexRequestBuilder(client, IndexAction.INSTANCE));
}
private ReindexRequestBuilder(ElasticsearchClient client,
Action<ReindexRequest, ReindexResponse, ReindexRequestBuilder> action,
SearchRequestBuilder search, IndexRequestBuilder destination) {
super(client, action, search, new ReindexRequest(search.request(), destination.request()));
this.destination = destination;
}
@Override
protected ReindexRequestBuilder self() {
return this;
}
public IndexRequestBuilder destination() {
return destination;
}
/**
* Set the destination index.
*/
public ReindexRequestBuilder destination(String index) {
destination.setIndex(index);
return this;
}
/**
* Set the destination index and type.
*/
public ReindexRequestBuilder destination(String index, String type) {
destination.setIndex(index).setType(type);
return this;
}
}

View File

@ -0,0 +1,73 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.action.bulk.BulkItemResponse.Failure;
import org.elasticsearch.action.search.ShardSearchFailure;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.reindex.BulkByScrollTask.Status;
import java.io.IOException;
import java.util.List;
/**
* Response for the ReindexAction.
*/
public class ReindexResponse extends BulkIndexByScrollResponse {
public ReindexResponse() {
}
public ReindexResponse(TimeValue took, Status status, List<Failure> indexingFailures, List<ShardSearchFailure> searchFailures) {
super(took, status, indexingFailures, searchFailures);
}
public long getCreated() {
return getStatus().getCreated();
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.field("took", getTook());
getStatus().innerXContent(builder, params, true, false);
builder.startArray("failures");
for (Failure failure: getIndexingFailures()) {
builder.startObject();
failure.toXContent(builder, params);
builder.endObject();
}
for (ShardSearchFailure failure: getSearchFailures()) {
builder.startObject();
failure.toXContent(builder, params);
builder.endObject();
}
builder.endArray();
return builder;
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("ReindexResponse[");
builder.append("took=").append(getTook()).append(',');
getStatus().innerToString(builder, true, false);
return builder.append(']').toString();
}
}

View File

@ -0,0 +1,178 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.action.WriteConsistencyLevel;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.query.QueryParseContext;
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
import org.elasticsearch.rest.BytesRestResponse;
import org.elasticsearch.rest.RestChannel;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.script.Script;
import org.elasticsearch.search.aggregations.AggregatorParsers;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import static org.elasticsearch.common.unit.TimeValue.parseTimeValue;
import static org.elasticsearch.rest.RestRequest.Method.POST;
import static org.elasticsearch.rest.RestStatus.BAD_REQUEST;
/**
* Expose IndexBySearchRequest over rest.
*/
public class RestReindexAction extends AbstractBaseReindexRestHandler<ReindexRequest, ReindexResponse, TransportReindexAction> {
private static final ObjectParser<ReindexRequest, ReindexParseContext> PARSER = new ObjectParser<>("reindex");
static {
ObjectParser.Parser<SearchRequest, ReindexParseContext> sourceParser = (parser, search, context) -> {
/*
* Extract the parameters that we need from the parser. We could do
* away with this hack when search source has an ObjectParser.
*/
Map<String, Object> source = parser.map();
String[] indices = extractStringArray(source, "index");
if (indices != null) {
search.indices(indices);
}
String[] types = extractStringArray(source, "type");
if (types != null) {
search.types(types);
}
XContentBuilder builder = XContentFactory.contentBuilder(parser.contentType());
builder.map(source);
parser = parser.contentType().xContent().createParser(builder.bytes());
context.queryParseContext.reset(parser);
search.source().parseXContent(parser, context.queryParseContext, context.aggParsers);
};
ObjectParser<IndexRequest, Void> destParser = new ObjectParser<>("dest");
destParser.declareString(IndexRequest::index, new ParseField("index"));
destParser.declareString(IndexRequest::type, new ParseField("type"));
destParser.declareString(IndexRequest::routing, new ParseField("routing"));
destParser.declareString(IndexRequest::opType, new ParseField("opType"));
destParser.declareString((s, i) -> s.versionType(VersionType.fromString(i)), new ParseField("versionType"));
// These exist just so the user can get a nice validation error:
destParser.declareString(IndexRequest::timestamp, new ParseField("timestamp"));
destParser.declareString((i, ttl) -> i.ttl(parseTimeValue(ttl, TimeValue.timeValueMillis(-1), "ttl").millis()),
new ParseField("ttl"));
PARSER.declareField((p, v, c) -> sourceParser.parse(p, v.getSearchRequest(), c), new ParseField("source"), ValueType.OBJECT);
PARSER.declareField((p, v, c) -> destParser.parse(p, v.getDestination(), null), new ParseField("dest"), ValueType.OBJECT);
PARSER.declareInt(ReindexRequest::setSize, new ParseField("size"));
PARSER.declareField((p, v, c) -> v.setScript(Script.parse(p, c.queryParseContext.parseFieldMatcher())), new ParseField("script"),
ValueType.OBJECT);
PARSER.declareString(ReindexRequest::setConflicts, new ParseField("conflicts"));
}
@Inject
public RestReindexAction(Settings settings, RestController controller, Client client,
IndicesQueriesRegistry indicesQueriesRegistry, AggregatorParsers aggParsers, ClusterService clusterService,
TransportReindexAction action) {
super(settings, client, indicesQueriesRegistry, aggParsers, clusterService, action);
controller.registerHandler(POST, "/_reindex", this);
}
@Override
public void handleRequest(RestRequest request, RestChannel channel, Client client) throws IOException {
if (false == request.hasContent()) {
badRequest(channel, "body required");
return;
}
ReindexRequest internalRequest = new ReindexRequest(new SearchRequest(), new IndexRequest());
try (XContentParser xcontent = XContentFactory.xContent(request.content()).createParser(request.content())) {
PARSER.parse(xcontent, internalRequest, new ReindexParseContext(new QueryParseContext(indicesQueriesRegistry), aggParsers));
} catch (ParsingException e) {
logger.warn("Bad request", e);
badRequest(channel, e.getDetailedMessage());
return;
}
parseCommon(internalRequest, request);
execute(request, internalRequest, channel);
}
private void badRequest(RestChannel channel, String message) {
try {
XContentBuilder builder = channel.newErrorBuilder();
channel.sendResponse(new BytesRestResponse(BAD_REQUEST, builder.startObject().field("error", message).endObject()));
} catch (IOException e) {
logger.warn("Failed to send response", e);
}
}
public static void parseCommon(AbstractBulkByScrollRequest<?> internalRequest, RestRequest request) {
internalRequest.setRefresh(request.paramAsBoolean("refresh", internalRequest.isRefresh()));
internalRequest.setTimeout(request.paramAsTime("timeout", internalRequest.getTimeout()));
String consistency = request.param("consistency");
if (consistency != null) {
internalRequest.setConsistency(WriteConsistencyLevel.fromString(consistency));
}
}
/**
* Yank a string array from a map. Emulates XContent's permissive String to
* String array conversions.
*/
private static String[] extractStringArray(Map<String, Object> source, String name) {
Object value = source.remove(name);
if (value == null) {
return null;
}
if (value instanceof List) {
@SuppressWarnings("unchecked")
List<String> list = (List<String>) value;
return list.toArray(new String[list.size()]);
} else if (value instanceof String) {
return new String[] {(String) value};
} else {
throw new IllegalArgumentException("Expected [" + name + "] to be a list of a string but was [" + value + ']');
}
}
private class ReindexParseContext {
private final QueryParseContext queryParseContext;
private final AggregatorParsers aggParsers;
public ReindexParseContext(QueryParseContext queryParseContext, AggregatorParsers aggParsers) {
this.queryParseContext = queryParseContext;
this.aggParsers = aggParsers;
}
}
}

View File

@ -0,0 +1,112 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
import org.elasticsearch.rest.RestChannel;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.action.search.RestSearchAction;
import org.elasticsearch.rest.action.support.RestActions;
import org.elasticsearch.script.Script;
import org.elasticsearch.search.aggregations.AggregatorParsers;
import java.util.Map;
import static org.elasticsearch.index.reindex.AbstractBulkByScrollRequest.SIZE_ALL_MATCHES;
import static org.elasticsearch.index.reindex.RestReindexAction.parseCommon;
import static org.elasticsearch.rest.RestRequest.Method.POST;
public class RestUpdateByQueryAction extends
AbstractBaseReindexRestHandler<UpdateByQueryRequest, BulkIndexByScrollResponse, TransportUpdateByQueryAction> {
@Inject
public RestUpdateByQueryAction(Settings settings, RestController controller, Client client,
IndicesQueriesRegistry indicesQueriesRegistry, AggregatorParsers aggParsers, ClusterService clusterService,
TransportUpdateByQueryAction action) {
super(settings, client, indicesQueriesRegistry, aggParsers, clusterService, action);
controller.registerHandler(POST, "/{index}/_update_by_query", this);
controller.registerHandler(POST, "/{index}/{type}/_update_by_query", this);
}
@Override
protected void handleRequest(RestRequest request, RestChannel channel, Client client) throws Exception {
/*
* Passing the search request through UpdateByQueryRequest first allows
* it to set its own defaults which differ from SearchRequest's
* defaults. Then the parse can override them.
*/
UpdateByQueryRequest internalRequest = new UpdateByQueryRequest(new SearchRequest());
int scrollSize = internalRequest.getSearchRequest().source().size();
internalRequest.getSearchRequest().source().size(SIZE_ALL_MATCHES);
/*
* We can't send parseSearchRequest REST content that it doesn't support
* so we will have to remove the content that is valid in addition to
* what it supports from the content first. This is a temporary hack and
* should get better when SearchRequest has full ObjectParser support
* then we can delegate and stuff.
*/
BytesReference bodyContent = null;
if (RestActions.hasBodyContent(request)) {
bodyContent = RestActions.getRestContent(request);
Tuple<XContentType, Map<String, Object>> body = XContentHelper.convertToMap(bodyContent, false);
boolean modified = false;
String conflicts = (String) body.v2().remove("conflicts");
if (conflicts != null) {
internalRequest.setConflicts(conflicts);
modified = true;
}
@SuppressWarnings("unchecked")
Map<String, Object> script = (Map<String, Object>) body.v2().remove("script");
if (script != null) {
internalRequest.setScript(Script.parse(script, false, parseFieldMatcher));
modified = true;
}
if (modified) {
XContentBuilder builder = XContentFactory.contentBuilder(body.v1());
builder.map(body.v2());
bodyContent = builder.bytes();
}
}
RestSearchAction.parseSearchRequest(internalRequest.getSearchRequest(), indicesQueriesRegistry, request,
parseFieldMatcher, aggParsers, bodyContent);
String conflicts = request.param("conflicts");
if (conflicts != null) {
internalRequest.setConflicts(conflicts);
}
parseCommon(internalRequest, request);
internalRequest.setSize(internalRequest.getSearchRequest().source().size());
internalRequest.getSearchRequest().source().size(request.paramAsInt("scroll_size", scrollSize));
execute(request, internalRequest, channel);
}
}

View File

@ -0,0 +1,273 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.bulk.BulkItemResponse.Failure;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.ShardSearchFailure;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.AutoCreateIndex;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.lucene.uid.Versions;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.mapper.internal.TTLFieldMapper;
import org.elasticsearch.index.mapper.internal.VersionFieldMapper;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.util.List;
import java.util.Objects;
import static java.util.Objects.requireNonNull;
import static org.elasticsearch.index.VersionType.INTERNAL;
public class TransportReindexAction extends HandledTransportAction<ReindexRequest, ReindexResponse> {
private final ClusterService clusterService;
private final ScriptService scriptService;
private final AutoCreateIndex autoCreateIndex;
private final Client client;
@Inject
public TransportReindexAction(Settings settings, ThreadPool threadPool, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, ClusterService clusterService, ScriptService scriptService,
AutoCreateIndex autoCreateIndex, Client client, TransportService transportService) {
super(settings, ReindexAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver,
ReindexRequest::new);
this.clusterService = clusterService;
this.scriptService = scriptService;
this.autoCreateIndex = autoCreateIndex;
this.client = client;
}
@Override
protected void doExecute(Task task, ReindexRequest request, ActionListener<ReindexResponse> listener) {
validateAgainstAliases(request.getSearchRequest(), request.getDestination(), indexNameExpressionResolver, autoCreateIndex,
clusterService.state());
new AsyncIndexBySearchAction((BulkByScrollTask) task, logger, scriptService, client, threadPool, request, listener).start();
}
@Override
protected void doExecute(ReindexRequest request, ActionListener<ReindexResponse> listener) {
throw new UnsupportedOperationException("task required");
}
/**
* Throws an ActionRequestValidationException if the request tries to index
* back into the same index or into an index that points to two indexes.
* This cannot be done during request validation because the cluster state
* isn't available then. Package private for testing.
*/
static String validateAgainstAliases(SearchRequest source, IndexRequest destination,
IndexNameExpressionResolver indexNameExpressionResolver, AutoCreateIndex autoCreateIndex, ClusterState clusterState) {
String target = destination.index();
if (false == autoCreateIndex.shouldAutoCreate(target, clusterState)) {
/*
* If we're going to autocreate the index we don't need to resolve
* it. This is the same sort of dance that TransportIndexRequest
* uses to decide to autocreate the index.
*/
target = indexNameExpressionResolver.concreteIndices(clusterState, destination)[0];
}
for (String sourceIndex: indexNameExpressionResolver.concreteIndices(clusterState, source)) {
if (sourceIndex.equals(target)) {
ActionRequestValidationException e = new ActionRequestValidationException();
e.addValidationError("reindex cannot write into an index its reading from [" + target + ']');
throw e;
}
}
return target;
}
/**
* Simple implementation of reindex using scrolling and bulk. There are tons
* of optimizations that can be done on certain types of reindex requests
* but this makes no attempt to do any of them so it can be as simple
* possible.
*/
static class AsyncIndexBySearchAction extends AbstractAsyncBulkIndexByScrollAction<ReindexRequest, ReindexResponse> {
public AsyncIndexBySearchAction(BulkByScrollTask task, ESLogger logger, ScriptService scriptService, Client client,
ThreadPool threadPool, ReindexRequest request, ActionListener<ReindexResponse> listener) {
super(task, logger, scriptService, client, threadPool, request, request.getSearchRequest(), listener);
}
@Override
protected IndexRequest buildIndexRequest(SearchHit doc) {
IndexRequest index = new IndexRequest();
// Copy the index from the request so we always write where it asked to write
index.index(mainRequest.getDestination().index());
// If the request override's type then the user wants all documents in that type. Otherwise keep the doc's type.
if (mainRequest.getDestination().type() == null) {
index.type(doc.type());
} else {
index.type(mainRequest.getDestination().type());
}
/*
* Internal versioning can just use what we copied from the destination request. Otherwise we assume we're using external
* versioning and use the doc's version.
*/
index.versionType(mainRequest.getDestination().versionType());
if (index.versionType() == INTERNAL) {
index.version(mainRequest.getDestination().version());
} else {
index.version(doc.version());
}
// id and source always come from the found doc. Scripts can change them but they operate on the index request.
index.id(doc.id());
index.source(doc.sourceRef());
/*
* The rest of the index request just has to be copied from the template. It may be changed later from scripts or the superclass
* here on out operates on the index request rather than the template.
*/
index.routing(mainRequest.getDestination().routing());
index.parent(mainRequest.getDestination().parent());
index.timestamp(mainRequest.getDestination().timestamp());
index.ttl(mainRequest.getDestination().ttl());
index.contentType(mainRequest.getDestination().getContentType());
// OpType is synthesized from version so it is handled when we copy version above.
return index;
}
/**
* Override the simple copy behavior to allow more fine grained control.
*/
@Override
protected void copyRouting(IndexRequest index, SearchHit doc) {
String routingSpec = mainRequest.getDestination().routing();
if (routingSpec == null) {
super.copyRouting(index, doc);
return;
}
if (routingSpec.startsWith("=")) {
index.routing(mainRequest.getDestination().routing().substring(1));
return;
}
switch (routingSpec) {
case "keep":
super.copyRouting(index, doc);
break;
case "discard":
index.routing(null);
break;
default:
throw new IllegalArgumentException("Unsupported routing command");
}
}
@Override
protected ReindexResponse buildResponse(TimeValue took, List<Failure> indexingFailures, List<ShardSearchFailure> searchFailures) {
return new ReindexResponse(took, task.getStatus(), indexingFailures, searchFailures);
}
/*
* Methods below here handle script updating the index request. They try
* to be pretty liberal with regards to types because script are often
* dynamically typed.
*/
@Override
protected void scriptChangedIndex(IndexRequest index, Object to) {
requireNonNull(to, "Can't reindex without a destination index!");
index.index(to.toString());
}
@Override
protected void scriptChangedType(IndexRequest index, Object to) {
requireNonNull(to, "Can't reindex without a destination type!");
index.type(to.toString());
}
@Override
protected void scriptChangedId(IndexRequest index, Object to) {
index.id(Objects.toString(to, null));
}
@Override
protected void scriptChangedVersion(IndexRequest index, Object to) {
if (to == null) {
index.version(Versions.MATCH_ANY).versionType(INTERNAL);
return;
}
index.version(asLong(to, VersionFieldMapper.NAME));
}
@Override
protected void scriptChangedParent(IndexRequest index, Object to) {
// Have to override routing with parent just in case its changed
String routing = Objects.toString(to, null);
index.parent(routing).routing(routing);
}
@Override
protected void scriptChangedRouting(IndexRequest index, Object to) {
index.routing(Objects.toString(to, null));
}
@Override
protected void scriptChangedTimestamp(IndexRequest index, Object to) {
index.timestamp(Objects.toString(to, null));
}
@Override
protected void scriptChangedTTL(IndexRequest index, Object to) {
if (to == null) {
index.ttl((TimeValue) null);
return;
}
index.ttl(asLong(to, TTLFieldMapper.NAME));
}
private long asLong(Object from, String name) {
/*
* Stuffing a number into the map will have converted it to
* some Number.
*/
Number fromNumber;
try {
fromNumber = (Number) from;
} catch (ClassCastException e) {
throw new IllegalArgumentException(name + " may only be set to an int or a long but was [" + from + "]", e);
}
long l = fromNumber.longValue();
// Check that we didn't round when we fetched the value.
if (fromNumber.doubleValue() != l) {
throw new IllegalArgumentException(name + " may only be set to an int or a long but was [" + from + "]");
}
return l;
}
}
}

View File

@ -0,0 +1,142 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.bulk.BulkItemResponse.Failure;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.search.ShardSearchFailure;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.mapper.internal.IdFieldMapper;
import org.elasticsearch.index.mapper.internal.IndexFieldMapper;
import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
import org.elasticsearch.index.mapper.internal.RoutingFieldMapper;
import org.elasticsearch.index.mapper.internal.TTLFieldMapper;
import org.elasticsearch.index.mapper.internal.TimestampFieldMapper;
import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.util.List;
public class TransportUpdateByQueryAction extends HandledTransportAction<UpdateByQueryRequest, BulkIndexByScrollResponse> {
private final Client client;
private final ScriptService scriptService;
@Inject
public TransportUpdateByQueryAction(Settings settings, ThreadPool threadPool, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, Client client, TransportService transportService,
ScriptService scriptService) {
super(settings, UpdateByQueryAction.NAME, threadPool, transportService, actionFilters,
indexNameExpressionResolver, UpdateByQueryRequest::new);
this.client = client;
this.scriptService = scriptService;
}
@Override
protected void doExecute(Task task, UpdateByQueryRequest request,
ActionListener<BulkIndexByScrollResponse> listener) {
new AsyncIndexBySearchAction((BulkByScrollTask) task, logger, scriptService, client, threadPool, request, listener).start();
}
@Override
protected void doExecute(UpdateByQueryRequest request, ActionListener<BulkIndexByScrollResponse> listener) {
throw new UnsupportedOperationException("task required");
}
/**
* Simple implementation of update-by-query using scrolling and bulk.
*/
static class AsyncIndexBySearchAction extends AbstractAsyncBulkIndexByScrollAction<UpdateByQueryRequest, BulkIndexByScrollResponse> {
public AsyncIndexBySearchAction(BulkByScrollTask task, ESLogger logger, ScriptService scriptService, Client client,
ThreadPool threadPool, UpdateByQueryRequest request, ActionListener<BulkIndexByScrollResponse> listener) {
super(task, logger, scriptService, client, threadPool, request, request.getSearchRequest(), listener);
}
@Override
protected IndexRequest buildIndexRequest(SearchHit doc) {
IndexRequest index = new IndexRequest();
index.index(doc.index());
index.type(doc.type());
index.id(doc.id());
index.source(doc.sourceRef());
index.versionType(VersionType.INTERNAL);
index.version(doc.version());
return index;
}
@Override
protected BulkIndexByScrollResponse buildResponse(TimeValue took, List<Failure> indexingFailures,
List<ShardSearchFailure> searchFailures) {
return new BulkIndexByScrollResponse(took, task.getStatus(), indexingFailures, searchFailures);
}
@Override
protected void scriptChangedIndex(IndexRequest index, Object to) {
throw new IllegalArgumentException("Modifying [" + IndexFieldMapper.NAME + "] not allowed");
}
@Override
protected void scriptChangedType(IndexRequest index, Object to) {
throw new IllegalArgumentException("Modifying [" + TypeFieldMapper.NAME + "] not allowed");
}
@Override
protected void scriptChangedId(IndexRequest index, Object to) {
throw new IllegalArgumentException("Modifying [" + IdFieldMapper.NAME + "] not allowed");
}
@Override
protected void scriptChangedVersion(IndexRequest index, Object to) {
throw new IllegalArgumentException("Modifying [_version] not allowed");
}
@Override
protected void scriptChangedRouting(IndexRequest index, Object to) {
throw new IllegalArgumentException("Modifying [" + RoutingFieldMapper.NAME + "] not allowed");
}
@Override
protected void scriptChangedParent(IndexRequest index, Object to) {
throw new IllegalArgumentException("Modifying [" + ParentFieldMapper.NAME + "] not allowed");
}
@Override
protected void scriptChangedTimestamp(IndexRequest index, Object to) {
throw new IllegalArgumentException("Modifying [" + TimestampFieldMapper.NAME + "] not allowed");
}
@Override
protected void scriptChangedTTL(IndexRequest index, Object to) {
throw new IllegalArgumentException("Modifying [" + TTLFieldMapper.NAME + "] not allowed");
}
}
}

View File

@ -0,0 +1,43 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.action.Action;
import org.elasticsearch.client.ElasticsearchClient;
public class UpdateByQueryAction extends
Action<UpdateByQueryRequest, BulkIndexByScrollResponse, UpdateByQueryRequestBuilder> {
public static final UpdateByQueryAction INSTANCE = new UpdateByQueryAction();
public static final String NAME = "indices:data/write/update/byquery";
private UpdateByQueryAction() {
super(NAME);
}
@Override
public UpdateByQueryRequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new UpdateByQueryRequestBuilder(client, this);
}
@Override
public BulkIndexByScrollResponse newResponse() {
return new BulkIndexByScrollResponse();
}
}

View File

@ -0,0 +1,48 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.action.search.SearchRequest;
/**
* Request to reindex a set of documents where they are without changing their
* locations or IDs.
*/
public class UpdateByQueryRequest extends AbstractBulkIndexByScrollRequest<UpdateByQueryRequest> {
public UpdateByQueryRequest() {
}
public UpdateByQueryRequest(SearchRequest search) {
super(search);
}
@Override
protected UpdateByQueryRequest self() {
return this;
}
@Override
public String toString() {
StringBuilder b = new StringBuilder();
b.append("update-by-query ");
searchToString(b);
return b.toString();
}
}

View File

@ -0,0 +1,51 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.search.SearchAction;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.client.ElasticsearchClient;
public class UpdateByQueryRequestBuilder extends
AbstractBulkIndexByScrollRequestBuilder<UpdateByQueryRequest, BulkIndexByScrollResponse, UpdateByQueryRequestBuilder> {
public UpdateByQueryRequestBuilder(ElasticsearchClient client,
Action<UpdateByQueryRequest, BulkIndexByScrollResponse, UpdateByQueryRequestBuilder> action) {
this(client, action, new SearchRequestBuilder(client, SearchAction.INSTANCE));
}
private UpdateByQueryRequestBuilder(ElasticsearchClient client,
Action<UpdateByQueryRequest, BulkIndexByScrollResponse, UpdateByQueryRequestBuilder> action,
SearchRequestBuilder search) {
super(client, action, search, new UpdateByQueryRequest(search.request()));
}
@Override
protected UpdateByQueryRequestBuilder self() {
return this;
}
@Override
public UpdateByQueryRequestBuilder abortOnVersionConflict(boolean abortOnVersionConflict) {
request.setAbortOnVersionConflict(abortOnVersionConflict);
return this;
}
}

View File

@ -0,0 +1,68 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.index.Index;
import org.elasticsearch.script.ExecutableScript;
import org.elasticsearch.search.SearchHitField;
import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.internal.InternalSearchHit;
import java.util.HashMap;
import java.util.Map;
import java.util.function.Consumer;
import static java.util.Collections.singletonMap;
import static org.hamcrest.Matchers.equalTo;
public abstract class AbstractAsyncBulkIndexByScrollActionScriptTestCase<
Request extends AbstractBulkIndexByScrollRequest<Request>,
Response extends BulkIndexByScrollResponse>
extends AbstractAsyncBulkIndexByScrollActionTestCase<Request, Response> {
protected IndexRequest applyScript(Consumer<Map<String, Object>> scriptBody) {
IndexRequest index = new IndexRequest("index", "type", "1").source(singletonMap("foo", "bar"));
Map<String, SearchHitField> fields = new HashMap<>();
InternalSearchHit doc = new InternalSearchHit(0, "id", new Text("type"), fields);
doc.shardTarget(new SearchShardTarget("nodeid", new Index("index", "uuid"), 1));
ExecutableScript script = new SimpleExecutableScript(scriptBody);
action().applyScript(index, doc, script, new HashMap<>());
return index;
}
public void testScriptAddingJunkToCtxIsError() {
try {
applyScript((Map<String, Object> ctx) -> ctx.put("junk", "junk"));
fail("Expected error");
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), equalTo("Invalid fields added to ctx [junk]"));
}
}
public void testChangeSource() {
IndexRequest index = applyScript((Map<String, Object> ctx) -> {
@SuppressWarnings("unchecked")
Map<String, Object> source = (Map<String, Object>) ctx.get("_source");
source.put("bar", "cat");
});
assertEquals("cat", index.sourceAsMap().get("bar"));
}
}

View File

@ -0,0 +1,55 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.threadpool.ThreadPool;
import org.junit.After;
import org.junit.Before;
public abstract class AbstractAsyncBulkIndexByScrollActionTestCase<
Request extends AbstractBulkIndexByScrollRequest<Request>,
Response extends BulkIndexByScrollResponse>
extends ESTestCase {
protected ThreadPool threadPool;
protected BulkByScrollTask task;
@Before
public void setupForTest() {
threadPool = new ThreadPool(getTestName());
task = new BulkByScrollTask(1, "test", "test", "test");
}
@After
@Override
public void tearDown() throws Exception {
super.tearDown();
threadPool.shutdown();
}
protected abstract AbstractAsyncBulkIndexByScrollAction<Request, Response> action();
protected abstract Request request();
protected PlainActionFuture<Response> listener() {
return new PlainActionFuture<>();
}
}

View File

@ -0,0 +1,61 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.mapper.internal.TTLFieldMapper;
import org.elasticsearch.index.mapper.internal.TimestampFieldMapper;
import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.internal.InternalSearchHit;
import org.elasticsearch.search.internal.InternalSearchHitField;
import static java.util.Collections.singletonList;
import static java.util.Collections.singletonMap;
import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
public abstract class AbstractAsyncBulkIndexbyScrollActionMetadataTestCase<
Request extends AbstractBulkIndexByScrollRequest<Request>,
Response extends BulkIndexByScrollResponse>
extends AbstractAsyncBulkIndexByScrollActionTestCase<Request, Response> {
/**
* Create a doc with some metadata.
*/
protected InternalSearchHit doc(String field, Object value) {
InternalSearchHit doc = new InternalSearchHit(0, "id", new Text("type"), singletonMap(field,
new InternalSearchHitField(field, singletonList(value))));
doc.shardTarget(new SearchShardTarget("node", new Index("index", "uuid"), 0));
return doc;
}
public void testTimestampIsCopied() {
IndexRequest index = new IndexRequest();
action().copyMetadata(index, doc(TimestampFieldMapper.NAME, 10L));
assertEquals("10", index.timestamp());
}
public void testTTL() throws Exception {
IndexRequest index = new IndexRequest();
action().copyMetadata(index, doc(TTLFieldMapper.NAME, 10L));
assertEquals(timeValueMillis(10), index.ttl());
}
}

View File

@ -0,0 +1,122 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.hamcrest.Description;
import org.hamcrest.Matcher;
import org.hamcrest.TypeSafeMatcher;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.nullValue;
public abstract class AbstractBulkIndexByScrollResponseMatcher<
Response extends BulkIndexByScrollResponse,
Self extends AbstractBulkIndexByScrollResponseMatcher<Response, Self>>
extends TypeSafeMatcher<Response> {
private Matcher<Long> updatedMatcher = equalTo(0L);
/**
* Matches for number of batches. Optional.
*/
private Matcher<Integer> batchesMatcher;
private Matcher<Long> versionConflictsMatcher = equalTo(0L);
private Matcher<Integer> failuresMatcher = equalTo(0);
private Matcher<String> reasonCancelledMatcher = nullValue(String.class);
protected abstract Self self();
public Self updated(Matcher<Long> updatedMatcher) {
this.updatedMatcher = updatedMatcher;
return self();
}
public Self updated(long updated) {
return updated(equalTo(updated));
}
/**
* Set the matches for the number of batches. Defaults to matching any
* integer because we usually don't care about how many batches the job
* takes.
*/
public Self batches(Matcher<Integer> batchesMatcher) {
this.batchesMatcher = batchesMatcher;
return self();
}
public Self batches(int batches) {
return batches(equalTo(batches));
}
public Self batches(int total, int batchSize) {
// Round up
return batches((total + batchSize - 1) / batchSize);
}
public Self versionConflicts(Matcher<Long> versionConflictsMatcher) {
this.versionConflictsMatcher = versionConflictsMatcher;
return self();
}
public Self versionConflicts(long versionConflicts) {
return versionConflicts(equalTo(versionConflicts));
}
/**
* Set the matcher for the size of the failures list. For more in depth
* matching do it by hand. The type signatures required to match the
* actual failures list here just don't work.
*/
public Self failures(Matcher<Integer> failuresMatcher) {
this.failuresMatcher = failuresMatcher;
return self();
}
/**
* Set the expected size of the failures list.
*/
public Self failures(int failures) {
return failures(equalTo(failures));
}
public Self reasonCancelled(Matcher<String> reasonCancelledMatcher) {
this.reasonCancelledMatcher = reasonCancelledMatcher;
return self();
}
@Override
protected boolean matchesSafely(Response item) {
return updatedMatcher.matches(item.getUpdated()) &&
(batchesMatcher == null || batchesMatcher.matches(item.getBatches())) &&
versionConflictsMatcher.matches(item.getVersionConflicts()) &&
failuresMatcher.matches(item.getIndexingFailures().size()) &&
reasonCancelledMatcher.matches(item.getReasonCancelled());
}
@Override
public void describeTo(Description description) {
description.appendText("indexed matches ").appendDescriptionOf(updatedMatcher);
if (batchesMatcher != null) {
description.appendText(" and batches matches ").appendDescriptionOf(batchesMatcher);
}
description.appendText(" and versionConflicts matches ").appendDescriptionOf(versionConflictsMatcher);
description.appendText(" and failures size matches ").appendDescriptionOf(failuresMatcher);
description.appendText(" and reason cancelled matches ").appendDescriptionOf(reasonCancelledMatcher);
}
}

View File

@ -0,0 +1,511 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestBuilder;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.bulk.BackoffPolicy;
import org.elasticsearch.action.bulk.BulkItemResponse;
import org.elasticsearch.action.bulk.BulkItemResponse.Failure;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.delete.DeleteResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.search.ClearScrollRequest;
import org.elasticsearch.action.search.ClearScrollResponse;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.ShardSearchFailure;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.action.support.replication.ReplicationRequest;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.action.update.UpdateResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.FilterClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.engine.VersionConflictEngineException;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.internal.InternalSearchHit;
import org.elasticsearch.search.internal.InternalSearchHits;
import org.elasticsearch.search.internal.InternalSearchResponse;
import org.elasticsearch.tasks.TaskManager;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.client.NoOpClient;
import org.elasticsearch.threadpool.ThreadPool;
import org.junit.After;
import org.junit.Before;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Consumer;
import static java.util.Collections.emptyList;
import static java.util.Collections.emptyMap;
import static org.apache.lucene.util.TestUtil.randomSimpleString;
import static org.elasticsearch.action.bulk.BackoffPolicy.constantBackoff;
import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
import static org.hamcrest.Matchers.contains;
import static org.hamcrest.Matchers.emptyCollectionOf;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.instanceOf;
public class AsyncBulkByScrollActionTests extends ESTestCase {
private MyMockClient client;
private ThreadPool threadPool;
private DummyAbstractBulkByScrollRequest mainRequest;
private SearchRequest firstSearchRequest;
private PlainActionFuture<BulkIndexByScrollResponse> listener;
private String scrollId;
private TaskManager taskManager;
private BulkByScrollTask task;
@Before
public void setupForTest() {
client = new MyMockClient(new NoOpClient(getTestName()));
threadPool = new ThreadPool(getTestName());
mainRequest = new DummyAbstractBulkByScrollRequest();
firstSearchRequest = null;
listener = new PlainActionFuture<>();
scrollId = null;
taskManager = new TaskManager(Settings.EMPTY);
task = (BulkByScrollTask) taskManager.register("don'tcare", "hereeither", mainRequest);
}
@After
public void tearDownAndVerifyCommonStuff() {
client.close();
threadPool.shutdown();
}
/**
* Generates a random scrollId and registers it so that when the test
* finishes we check that it was cleared. Subsequent calls reregister a new
* random scroll id so it is checked instead.
*/
private String scrollId() {
scrollId = randomSimpleString(random(), 1, 1000); // Empty string's get special behavior we don't want
return scrollId;
}
public void testScrollResponseSetsTotal() {
// Default is 0, meaning unstarted
assertEquals(0, task.getStatus().getTotal());
long total = randomIntBetween(0, Integer.MAX_VALUE);
InternalSearchHits hits = new InternalSearchHits(null, total, 0);
InternalSearchResponse searchResponse = new InternalSearchResponse(hits, null, null, null, false, false);
new DummyAbstractAsyncBulkByScrollAction()
.onScrollResponse(new SearchResponse(searchResponse, scrollId(), 5, 4, randomLong(), null));
assertEquals(total, task.getStatus().getTotal());
}
public void testEachScrollResponseIsABatch() {
// Replace the generic thread pool with one that executes immediately so the batch is updated immediately
threadPool.shutdown();
threadPool = new ThreadPool(getTestName()) {
@Override
public Executor generic() {
return new Executor() {
@Override
public void execute(Runnable command) {
command.run();
}
};
}
};
int maxBatches = randomIntBetween(0, 100);
for (int batches = 1; batches < maxBatches; batches++) {
InternalSearchHit hit = new InternalSearchHit(0, "id", new Text("type"), emptyMap());
InternalSearchHits hits = new InternalSearchHits(new InternalSearchHit[] { hit }, 0, 0);
InternalSearchResponse searchResponse = new InternalSearchResponse(hits, null, null, null, false, false);
new DummyAbstractAsyncBulkByScrollAction()
.onScrollResponse(new SearchResponse(searchResponse, scrollId(), 5, 4, randomLong(), null));
assertEquals(batches, task.getStatus().getBatches());
}
}
public void testBulkResponseSetsLotsOfStatus() {
mainRequest.setAbortOnVersionConflict(false);
int maxBatches = randomIntBetween(0, 100);
long versionConflicts = 0;
long created = 0;
long updated = 0;
long deleted = 0;
for (int batches = 0; batches < maxBatches; batches++) {
BulkItemResponse[] responses = new BulkItemResponse[randomIntBetween(0, 100)];
for (int i = 0; i < responses.length; i++) {
ShardId shardId = new ShardId(new Index("name", "uid"), 0);
String opType;
if (rarely()) {
opType = randomSimpleString(random());
versionConflicts++;
responses[i] = new BulkItemResponse(i, opType, new Failure(shardId.getIndexName(), "type", "id" + i,
new VersionConflictEngineException(shardId, "type", "id", "test")));
continue;
}
boolean createdResponse;
switch (randomIntBetween(0, 2)) {
case 0:
opType = randomFrom("index", "create");
createdResponse = true;
created++;
break;
case 1:
opType = randomFrom("index", "create");
createdResponse = false;
updated++;
break;
case 2:
opType = "delete";
createdResponse = false;
deleted++;
break;
default:
throw new RuntimeException("Bad scenario");
}
responses[i] = new BulkItemResponse(i, opType, new IndexResponse(shardId, "type", "id" + i, randomInt(), createdResponse));
}
new DummyAbstractAsyncBulkByScrollAction().onBulkResponse(new BulkResponse(responses, 0));
assertEquals(versionConflicts, task.getStatus().getVersionConflicts());
assertEquals(updated, task.getStatus().getUpdated());
assertEquals(created, task.getStatus().getCreated());
assertEquals(deleted, task.getStatus().getDeleted());
assertEquals(versionConflicts, task.getStatus().getVersionConflicts());
}
}
/**
* Mimicks a ThreadPool rejecting execution of the task.
*/
public void testThreadPoolRejectionsAbortRequest() throws Exception {
threadPool.shutdown();
threadPool = new ThreadPool(getTestName()) {
@Override
public Executor generic() {
return new Executor() {
@Override
public void execute(Runnable command) {
((AbstractRunnable) command).onRejection(new EsRejectedExecutionException("test"));
}
};
}
};
InternalSearchHits hits = new InternalSearchHits(null, 0, 0);
InternalSearchResponse searchResponse = new InternalSearchResponse(hits, null, null, null, false, false);
new DummyAbstractAsyncBulkByScrollAction()
.onScrollResponse(new SearchResponse(searchResponse, scrollId(), 5, 4, randomLong(), null));
try {
listener.get();
fail("Expected a failure");
} catch (ExecutionException e) {
assertThat(e.getMessage(), equalTo("EsRejectedExecutionException[test]"));
}
assertThat(client.scrollsCleared, contains(scrollId));
}
/**
* Mimicks shard search failures usually caused by the data node serving the
* scroll request going down.
*/
public void testShardFailuresAbortRequest() throws Exception {
ShardSearchFailure shardFailure = new ShardSearchFailure(new RuntimeException("test"));
new DummyAbstractAsyncBulkByScrollAction()
.onScrollResponse(new SearchResponse(null, scrollId(), 5, 4, randomLong(), new ShardSearchFailure[] { shardFailure }));
BulkIndexByScrollResponse response = listener.get();
assertThat(response.getIndexingFailures(), emptyCollectionOf(Failure.class));
assertThat(response.getSearchFailures(), contains(shardFailure));
assertNull(response.getReasonCancelled());
assertThat(client.scrollsCleared, contains(scrollId));
}
/**
* Mimicks bulk indexing failures.
*/
public void testBulkFailuresAbortRequest() throws Exception {
Failure failure = new Failure("index", "type", "id", new RuntimeException("test"));
DummyAbstractAsyncBulkByScrollAction action = new DummyAbstractAsyncBulkByScrollAction();
action.onBulkResponse(new BulkResponse(new BulkItemResponse[] {new BulkItemResponse(0, "index", failure)}, randomLong()));
BulkIndexByScrollResponse response = listener.get();
assertThat(response.getIndexingFailures(), contains(failure));
assertThat(response.getSearchFailures(), emptyCollectionOf(ShardSearchFailure.class));
assertNull(response.getReasonCancelled());
}
/**
* Mimicks script failures or general wrongness by implementers.
*/
public void testListenerReceiveBuildBulkExceptions() throws Exception {
DummyAbstractAsyncBulkByScrollAction action = new DummyAbstractAsyncBulkByScrollAction() {
@Override
protected BulkRequest buildBulk(Iterable<SearchHit> docs) {
throw new RuntimeException("surprise");
}
};
InternalSearchHit hit = new InternalSearchHit(0, "id", new Text("type"), emptyMap());
InternalSearchHits hits = new InternalSearchHits(new InternalSearchHit[] {hit}, 0, 0);
InternalSearchResponse searchResponse = new InternalSearchResponse(hits, null, null, null, false, false);
action.onScrollResponse(new SearchResponse(searchResponse, scrollId(), 5, 4, randomLong(), null));
try {
listener.get();
fail("Expected failure.");
} catch (ExecutionException e) {
assertThat(e.getCause(), instanceOf(RuntimeException.class));
assertThat(e.getCause().getMessage(), equalTo("surprise"));
}
}
/**
* Mimicks bulk rejections. These should be retried and eventually succeed.
*/
public void testBulkRejectionsRetryWithEnoughRetries() throws Exception {
int bulksToTry = randomIntBetween(1, 10);
long retryAttempts = 0;
for (int i = 0; i < bulksToTry; i++) {
retryAttempts += retryTestCase(false);
assertEquals(retryAttempts, task.getStatus().getRetries());
}
}
/**
* Mimicks bulk rejections. These should be retried but we fail anyway because we run out of retries.
*/
public void testBulkRejectionsRetryAndFailAnyway() throws Exception {
long retryAttempts = retryTestCase(true);
assertEquals(retryAttempts, task.getStatus().getRetries());
}
private long retryTestCase(boolean failWithRejection) throws Exception {
int totalFailures = randomIntBetween(1, mainRequest.getMaxRetries());
int size = randomIntBetween(1, 100);
int retryAttempts = totalFailures - (failWithRejection ? 1 : 0);
client.bulksToReject = client.bulksAttempts.get() + totalFailures;
/*
* When we get a successful bulk response we usually start the next scroll request but lets just intercept that so we don't have to
* deal with it. We just wait for it to happen.
*/
CountDownLatch successLatch = new CountDownLatch(1);
DummyAbstractAsyncBulkByScrollAction action = new DummyAbstractAsyncBulkByScrollAction() {
@Override
BackoffPolicy backoffPolicy() {
// Force a backoff time of 0 to prevent sleeping
return constantBackoff(timeValueMillis(0), retryAttempts);
}
@Override
void startNextScroll() {
successLatch.countDown();
}
};
BulkRequest request = new BulkRequest();
for (int i = 0; i < size + 1; i++) {
request.add(new IndexRequest("index", "type", "id" + i));
}
action.sendBulkRequest(request);
if (failWithRejection) {
BulkIndexByScrollResponse response = listener.get();
assertThat(response.getIndexingFailures(), hasSize(1));
assertEquals(response.getIndexingFailures().get(0).getStatus(), RestStatus.TOO_MANY_REQUESTS);
assertThat(response.getSearchFailures(), emptyCollectionOf(ShardSearchFailure.class));
assertNull(response.getReasonCancelled());
} else {
successLatch.await(10, TimeUnit.SECONDS);
}
return retryAttempts;
}
/**
* The default retry time matches what we say it is in the javadoc for the request.
*/
public void testDefaultRetryTimes() {
Iterator<TimeValue> policy = new DummyAbstractAsyncBulkByScrollAction().backoffPolicy().iterator();
long millis = 0;
while (policy.hasNext()) {
millis += policy.next().millis();
}
/*
* This is the total number of milliseconds that a reindex made with the default settings will backoff before attempting one final
* time. If that request is rejected then the whole process fails with a rejected exception.
*/
int defaultBackoffBeforeFailing = 59460;
assertEquals(defaultBackoffBeforeFailing, millis);
}
public void testCancelBeforeInitialSearch() throws Exception {
cancelTaskCase((DummyAbstractAsyncBulkByScrollAction action) -> action.initialSearch());
}
public void testCancelBeforeScrollResponse() throws Exception {
// We bail so early we don't need to pass in a half way valid response.
cancelTaskCase((DummyAbstractAsyncBulkByScrollAction action) -> action.onScrollResponse(null));
}
public void testCancelBeforeSendBulkRequest() throws Exception {
// We bail so early we don't need to pass in a half way valid request.
cancelTaskCase((DummyAbstractAsyncBulkByScrollAction action) -> action.sendBulkRequest(null));
}
public void testCancelBeforeOnBulkResponse() throws Exception {
// We bail so early we don't need to pass in a half way valid response.
cancelTaskCase((DummyAbstractAsyncBulkByScrollAction action) -> action.onBulkResponse(null));
}
public void testCancelBeforeStartNextScroll() throws Exception {
cancelTaskCase((DummyAbstractAsyncBulkByScrollAction action) -> action.startNextScroll());
}
public void testCancelBeforeStartNormalTermination() throws Exception {
// Refresh or not doesn't matter - we don't try to refresh.
mainRequest.setRefresh(usually());
cancelTaskCase((DummyAbstractAsyncBulkByScrollAction action) -> action.startNormalTermination(emptyList(), emptyList()));
// This wouldn't return if we called refresh - the action would hang waiting for the refresh that we haven't mocked.
}
private void cancelTaskCase(Consumer<DummyAbstractAsyncBulkByScrollAction> testMe) throws Exception {
DummyAbstractAsyncBulkByScrollAction action = new DummyAbstractAsyncBulkByScrollAction();
boolean previousScrollSet = usually();
if (previousScrollSet) {
action.setScroll(scrollId());
}
String reason = randomSimpleString(random());
taskManager.cancel(task, reason, (Set<String> s) -> {});
testMe.accept(action);
assertEquals(reason, listener.get().getReasonCancelled());
if (previousScrollSet) {
// Canceled tasks always start to clear the scroll before they die.
assertThat(client.scrollsCleared, contains(scrollId));
}
}
private class DummyAbstractAsyncBulkByScrollAction
extends AbstractAsyncBulkByScrollAction<DummyAbstractBulkByScrollRequest, BulkIndexByScrollResponse> {
public DummyAbstractAsyncBulkByScrollAction() {
super(AsyncBulkByScrollActionTests.this.task, logger, client, threadPool,
AsyncBulkByScrollActionTests.this.mainRequest, firstSearchRequest, listener);
}
@Override
protected BulkRequest buildBulk(Iterable<SearchHit> docs) {
return new BulkRequest();
}
@Override
protected BulkIndexByScrollResponse buildResponse(TimeValue took, List<Failure> indexingFailures,
List<ShardSearchFailure> searchFailures) {
return new BulkIndexByScrollResponse(took, task.getStatus(), indexingFailures, searchFailures);
}
}
private static class DummyAbstractBulkByScrollRequest extends AbstractBulkByScrollRequest<DummyAbstractBulkByScrollRequest> {
@Override
protected DummyAbstractBulkByScrollRequest self() {
return this;
}
}
private static class MyMockClient extends FilterClient {
private final List<String> scrollsCleared = new ArrayList<>();
private final AtomicInteger bulksAttempts = new AtomicInteger();
private int bulksToReject = 0;
public MyMockClient(Client in) {
super(in);
}
@Override
@SuppressWarnings("unchecked")
protected <Request extends ActionRequest<Request>, Response extends ActionResponse,
RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(
Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
if (request instanceof ClearScrollRequest) {
ClearScrollRequest clearScroll = (ClearScrollRequest) request;
scrollsCleared.addAll(clearScroll.getScrollIds());
listener.onResponse((Response) new ClearScrollResponse(true, clearScroll.getScrollIds().size()));
return;
}
if (request instanceof BulkRequest) {
BulkRequest bulk = (BulkRequest) request;
int toReject;
if (bulksAttempts.incrementAndGet() > bulksToReject) {
toReject = -1;
} else {
toReject = randomIntBetween(0, bulk.requests().size() - 1);
}
BulkItemResponse[] responses = new BulkItemResponse[bulk.requests().size()];
for (int i = 0; i < bulk.requests().size(); i++) {
ActionRequest<?> item = bulk.requests().get(i);
String opType;
DocWriteResponse response;
ShardId shardId = new ShardId(new Index(((ReplicationRequest<?>) item).index(), "uuid"), 0);
if (item instanceof IndexRequest) {
IndexRequest index = (IndexRequest) item;
opType = index.opType().lowercase();
response = new IndexResponse(shardId, index.type(), index.id(), randomIntBetween(0, Integer.MAX_VALUE),
true);
} else if (item instanceof UpdateRequest) {
UpdateRequest update = (UpdateRequest) item;
opType = "update";
response = new UpdateResponse(shardId, update.type(), update.id(),
randomIntBetween(0, Integer.MAX_VALUE), true);
} else if (item instanceof DeleteRequest) {
DeleteRequest delete = (DeleteRequest) item;
opType = "delete";
response = new DeleteResponse(shardId, delete.type(), delete.id(), randomIntBetween(0, Integer.MAX_VALUE),
true);
} else {
throw new RuntimeException("Unknown request: " + item);
}
if (i == toReject) {
responses[i] = new BulkItemResponse(i, opType,
new Failure(response.getIndex(), response.getType(), response.getId(), new EsRejectedExecutionException()));
} else {
responses[i] = new BulkItemResponse(i, opType, response);
}
}
listener.onResponse((Response) new BulkResponse(responses, 1));
return;
}
super.doExecute(action, request, listener);
}
}
}

View File

@ -0,0 +1,113 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.test.ESTestCase;
import org.junit.Before;
public class BulkByScrollTaskTests extends ESTestCase {
private BulkByScrollTask task;
@Before
public void createTask() {
task = new BulkByScrollTask(1, "test_type", "test_action", "test");
}
public void testBasicData() {
assertEquals(1, task.getId());
assertEquals("test_type", task.getType());
assertEquals("test_action", task.getAction());
}
public void testProgress() {
long created = 0;
long updated = 0;
long deleted = 0;
long versionConflicts = 0;
long noops = 0;
int batch = 0;
BulkByScrollTask.Status status = task.getStatus();
assertEquals(0, status.getTotal());
assertEquals(created, status.getCreated());
assertEquals(updated, status.getUpdated());
assertEquals(deleted, status.getDeleted());
assertEquals(versionConflicts, status.getVersionConflicts());
assertEquals(batch, status.getBatches());
assertEquals(noops, status.getNoops());
long totalHits = randomIntBetween(10, 1000);
task.setTotal(totalHits);
for (long p = 0; p < totalHits; p++) {
status = task.getStatus();
assertEquals(totalHits, status.getTotal());
assertEquals(created, status.getCreated());
assertEquals(updated, status.getUpdated());
assertEquals(deleted, status.getDeleted());
assertEquals(versionConflicts, status.getVersionConflicts());
assertEquals(batch, status.getBatches());
assertEquals(noops, status.getNoops());
if (randomBoolean()) {
created++;
task.countCreated();
} else if (randomBoolean()) {
updated++;
task.countUpdated();
} else {
deleted++;
task.countDeleted();
}
if (rarely()) {
versionConflicts++;
task.countVersionConflict();
}
if (rarely()) {
batch++;
task.countBatch();
}
if (rarely()) {
noops++;
task.countNoop();
}
}
status = task.getStatus();
assertEquals(totalHits, status.getTotal());
assertEquals(created, status.getCreated());
assertEquals(updated, status.getUpdated());
assertEquals(deleted, status.getDeleted());
assertEquals(versionConflicts, status.getVersionConflicts());
assertEquals(batch, status.getBatches());
assertEquals(noops, status.getNoops());
}
public void testStatusHatesNegatives() {
expectThrows(IllegalArgumentException.class, () -> new BulkByScrollTask.Status(-1, 0, 0, 0, 0, 0, 0, 0, null));
expectThrows(IllegalArgumentException.class, () -> new BulkByScrollTask.Status(0, -1, 0, 0, 0, 0, 0, 0, null));
expectThrows(IllegalArgumentException.class, () -> new BulkByScrollTask.Status(0, 0, -1, 0, 0, 0, 0, 0, null));
expectThrows(IllegalArgumentException.class, () -> new BulkByScrollTask.Status(0, 0, 0, -1, 0, 0, 0, 0, null));
expectThrows(IllegalArgumentException.class, () -> new BulkByScrollTask.Status(0, 0, 0, 0, -1, 0, 0, 0, null));
expectThrows(IllegalArgumentException.class, () -> new BulkByScrollTask.Status(0, 0, 0, 0, 0, -1, 0, 0, null));
expectThrows(IllegalArgumentException.class, () -> new BulkByScrollTask.Status(0, 0, 0, 0, 0, 0, -1, 0, null));
expectThrows(IllegalArgumentException.class, () -> new BulkByScrollTask.Status(0, 0, 0, 0, 0, 0, 0, -1, null));
}
}

View File

@ -0,0 +1,146 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.ListenableActionFuture;
import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskInfo;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.script.ExecutableScript;
import org.elasticsearch.script.NativeScriptFactory;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptModule;
import org.elasticsearch.script.ScriptService.ScriptType;
import org.elasticsearch.test.ESIntegTestCase;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import static java.util.Collections.emptyMap;
import static org.elasticsearch.test.ESIntegTestCase.client;
import static org.hamcrest.Matchers.hasSize;
import static org.junit.Assert.assertThat;
/**
* Utilities for testing reindex and update-by-query cancelation. This whole class isn't thread safe. Luckily we run out tests in separate
* jvms.
*/
public class CancelTestUtils {
public static Collection<Class<? extends Plugin>> nodePlugins() {
return Arrays.asList(ReindexPlugin.class, StickyScriptPlugin.class);
}
private static final CyclicBarrier barrier = new CyclicBarrier(2);
public static <Request extends AbstractBulkIndexByScrollRequest<Request>,
Response extends ActionResponse,
Builder extends AbstractBulkIndexByScrollRequestBuilder<Request, Response, Builder>>
Response testCancel(ESIntegTestCase test, Builder request, String actionToCancel) throws Exception {
test.indexRandom(true, client().prepareIndex("source", "test", "1").setSource("foo", "a"),
client().prepareIndex("source", "test", "2").setSource("foo", "a"));
request.source("source").script(new Script("sticky", ScriptType.INLINE, "native", emptyMap()));
request.source().setSize(1);
ListenableActionFuture<Response> response = request.execute();
// Wait until the script is on the first document.
barrier.await(30, TimeUnit.SECONDS);
// Let just one document through.
barrier.await(30, TimeUnit.SECONDS);
// Wait until the script is on the second document.
barrier.await(30, TimeUnit.SECONDS);
// Cancel the request while the script is running. This will prevent the request from being sent at all.
List<TaskInfo> cancelledTasks = client().admin().cluster().prepareCancelTasks().setActions(actionToCancel).get().getTasks();
assertThat(cancelledTasks, hasSize(1));
// Now let the next document through. It won't be sent because the request is cancelled but we need to unblock the script.
barrier.await();
// Now we can just wait on the request and make sure it was actually cancelled half way through.
return response.get();
}
public static class StickyScriptPlugin extends Plugin {
@Override
public String name() {
return "sticky-script";
}
@Override
public String description() {
return "installs a script that \"sticks\" when it runs for testing reindex";
}
public void onModule(ScriptModule module) {
module.registerScript("sticky", StickyScriptFactory.class);
}
}
public static class StickyScriptFactory implements NativeScriptFactory {
@Override
public ExecutableScript newScript(Map<String, Object> params) {
return new ExecutableScript() {
private Map<String, Object> source;
@Override
@SuppressWarnings("unchecked") // Safe because _ctx always has this shape
public void setNextVar(String name, Object value) {
if ("ctx".equals(name)) {
Map<String, Object> ctx = (Map<String, Object>) value;
source = (Map<String, Object>) ctx.get("_source");
} else {
throw new IllegalArgumentException("Unexpected var: " + name);
}
}
@Override
public Object run() {
try {
// Tell the test we've started a document.
barrier.await(30, TimeUnit.SECONDS);
// Wait for the test to tell us to proceed.
barrier.await(30, TimeUnit.SECONDS);
// Make some change to the source so that update-by-query tests can make sure only one document was changed.
source.put("giraffes", "giraffes");
return null;
} catch (InterruptedException | BrokenBarrierException | TimeoutException e) {
throw new RuntimeException(e);
}
}
};
}
@Override
public boolean needsScores() {
return false;
}
}
}

View File

@ -0,0 +1,123 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
import org.elasticsearch.action.index.IndexRequestBuilder;
import java.util.ArrayList;
import java.util.List;
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
public class ReindexBasicTests extends ReindexTestCase {
public void testFiltering() throws Exception {
indexRandom(true, client().prepareIndex("source", "test", "1").setSource("foo", "a"),
client().prepareIndex("source", "test", "2").setSource("foo", "a"),
client().prepareIndex("source", "test", "3").setSource("foo", "b"),
client().prepareIndex("source", "test", "4").setSource("foo", "c"));
assertHitCount(client().prepareSearch("source").setSize(0).get(), 4);
// Copy all the docs
ReindexRequestBuilder copy = reindex().source("source").destination("dest", "all").refresh(true);
assertThat(copy.get(), responseMatcher().created(4));
assertHitCount(client().prepareSearch("dest").setTypes("all").setSize(0).get(), 4);
// Now none of them
copy = reindex().source("source").destination("all", "none").filter(termQuery("foo", "no_match")).refresh(true);
assertThat(copy.get(), responseMatcher().created(0));
assertHitCount(client().prepareSearch("dest").setTypes("none").setSize(0).get(), 0);
// Now half of them
copy = reindex().source("source").destination("dest", "half").filter(termQuery("foo", "a")).refresh(true);
assertThat(copy.get(), responseMatcher().created(2));
assertHitCount(client().prepareSearch("dest").setTypes("half").setSize(0).get(), 2);
// Limit with size
copy = reindex().source("source").destination("dest", "size_one").size(1).refresh(true);
assertThat(copy.get(), responseMatcher().created(1));
assertHitCount(client().prepareSearch("dest").setTypes("size_one").setSize(0).get(), 1);
}
public void testCopyMany() throws Exception {
List<IndexRequestBuilder> docs = new ArrayList<>();
int max = between(150, 500);
for (int i = 0; i < max; i++) {
docs.add(client().prepareIndex("source", "test", Integer.toString(i)).setSource("foo", "a"));
}
indexRandom(true, docs);
assertHitCount(client().prepareSearch("source").setSize(0).get(), max);
// Copy all the docs
ReindexRequestBuilder copy = reindex().source("source").destination("dest", "all").refresh(true);
// Use a small batch size so we have to use more than one batch
copy.source().setSize(5);
assertThat(copy.get(), responseMatcher().created(max).batches(max, 5));
assertHitCount(client().prepareSearch("dest").setTypes("all").setSize(0).get(), max);
// Copy some of the docs
int half = max / 2;
copy = reindex().source("source").destination("dest", "half").refresh(true);
// Use a small batch size so we have to use more than one batch
copy.source().setSize(5);
copy.size(half); // The real "size" of the request.
assertThat(copy.get(), responseMatcher().created(half).batches(half, 5));
assertHitCount(client().prepareSearch("dest").setTypes("half").setSize(0).get(), half);
}
public void testRefreshIsFalseByDefault() throws Exception {
refreshTestCase(null, false);
}
public void testRefreshFalseDoesntMakeVisible() throws Exception {
refreshTestCase(false, false);
}
public void testRefreshTrueMakesVisible() throws Exception {
refreshTestCase(true, true);
}
/**
* Executes a reindex into an index with -1 refresh_interval and checks that
* the documents are visible properly.
*/
private void refreshTestCase(Boolean refresh, boolean visible) throws Exception {
CreateIndexRequestBuilder create = client().admin().indices().prepareCreate("dest").setSettings("refresh_interval", -1);
assertAcked(create);
ensureYellow();
indexRandom(true, client().prepareIndex("source", "test", "1").setSource("foo", "a"),
client().prepareIndex("source", "test", "2").setSource("foo", "a"),
client().prepareIndex("source", "test", "3").setSource("foo", "b"),
client().prepareIndex("source", "test", "4").setSource("foo", "c"));
assertHitCount(client().prepareSearch("source").setSize(0).get(), 4);
// Copy all the docs
ReindexRequestBuilder copy = reindex().source("source").destination("dest", "all");
if (refresh != null) {
copy.refresh(refresh);
}
assertThat(copy.get(), responseMatcher().created(4));
assertHitCount(client().prepareSearch("dest").setTypes("all").setSize(0).get(), visible ? 4 : 0);
}
}

View File

@ -0,0 +1,52 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.plugins.Plugin;
import java.util.Collection;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
import static org.hamcrest.Matchers.equalTo;
/**
* Tests that you can actually cancel a reindex request and all the plumbing works. Doesn't test all of the different cancellation places -
* that is the responsibility of {@link AsyncBulkByScrollActionTests} which have more precise control to simulate failures but do not
* exercise important portion of the stack like transport and task management.
*/
public class ReindexCancelTests extends ReindexTestCase {
public void testCancel() throws Exception {
ReindexResponse response = CancelTestUtils.testCancel(this, reindex().destination("dest", "test"), ReindexAction.NAME);
assertThat(response, responseMatcher().created(1).reasonCancelled(equalTo("by user request")));
refresh("dest");
assertHitCount(client().prepareSearch("dest").setSize(0).get(), 1);
}
@Override
protected int numberOfShards() {
return 1;
}
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return CancelTestUtils.nodePlugins();
}
}

View File

@ -0,0 +1,147 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.bulk.BulkItemResponse.Failure;
import org.elasticsearch.action.index.IndexRequestBuilder;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import static org.elasticsearch.action.index.IndexRequest.OpType.CREATE;
import static org.hamcrest.Matchers.both;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.either;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.lessThanOrEqualTo;
/**
* Tests failure capturing and abort-on-failure behavior of reindex.
*/
public class ReindexFailureTests extends ReindexTestCase {
public void testFailuresCauseAbortDefault() throws Exception {
/*
* Create the destination index such that the copy will cause a mapping
* conflict on every request.
*/
indexRandom(true,
client().prepareIndex("dest", "test", "test").setSource("test", 10) /* Its a string in the source! */);
indexDocs(100);
ReindexRequestBuilder copy = reindex().source("source").destination("dest");
/*
* Set the search size to something very small to cause there to be
* multiple batches for this request so we can assert that we abort on
* the first batch.
*/
copy.source().setSize(1);
ReindexResponse response = copy.get();
assertThat(response, responseMatcher()
.batches(1)
.failures(both(greaterThan(0)).and(lessThanOrEqualTo(maximumNumberOfShards()))));
for (Failure failure: response.getIndexingFailures()) {
assertThat(failure.getMessage(), containsString("NumberFormatException[For input string: \"words words\"]"));
}
}
public void testAbortOnVersionConflict() throws Exception {
// Just put something in the way of the copy.
indexRandom(true,
client().prepareIndex("dest", "test", "1").setSource("test", "test"));
indexDocs(100);
ReindexRequestBuilder copy = reindex().source("source").destination("dest").abortOnVersionConflict(true);
// CREATE will cause the conflict to prevent the write.
copy.destination().setOpType(CREATE);
ReindexResponse response = copy.get();
assertThat(response, responseMatcher().batches(1).versionConflicts(1).failures(1).created(99));
for (Failure failure: response.getIndexingFailures()) {
assertThat(failure.getMessage(), containsString("VersionConflictEngineException[[test]["));
}
}
/**
* Make sure that search failures get pushed back to the user as failures of
* the whole process. We do lose some information about how far along the
* process got, but its important that they see these failures.
*/
public void testResponseOnSearchFailure() throws Exception {
/*
* Attempt to trigger a reindex failure by deleting the source index out
* from under it.
*/
int attempt = 1;
while (attempt < 5) {
indexDocs(100);
ReindexRequestBuilder copy = reindex().source("source").destination("dest");
copy.source().setSize(10);
Future<ReindexResponse> response = copy.execute();
client().admin().indices().prepareDelete("source").get();
try {
response.get();
logger.info("Didn't trigger a reindex failure on the {} attempt", attempt);
attempt++;
} catch (ExecutionException e) {
logger.info("Triggered a reindex failure on the {} attempt", attempt);
assertThat(e.getMessage(), either(containsString("all shards failed")).or(containsString("No search context found")));
return;
}
}
assumeFalse("Wasn't able to trigger a reindex failure in " + attempt + " attempts.", true);
}
public void testSettingTtlIsValidationFailure() throws Exception {
indexDocs(1);
ReindexRequestBuilder copy = reindex().source("source").destination("dest");
copy.destination().setTTL(123);
try {
copy.get();
} catch (ActionRequestValidationException e) {
assertThat(e.getMessage(), containsString("setting ttl on destination isn't supported. use scripts instead."));
}
}
public void testSettingTimestampIsValidationFailure() throws Exception {
indexDocs(1);
ReindexRequestBuilder copy = reindex().source("source").destination("dest");
copy.destination().setTimestamp("now");
try {
copy.get();
} catch (ActionRequestValidationException e) {
assertThat(e.getMessage(), containsString("setting timestamp on destination isn't supported. use scripts instead."));
}
}
private void indexDocs(int count) throws Exception {
List<IndexRequestBuilder> docs = new ArrayList<IndexRequestBuilder>(count);
for (int i = 0; i < count; i++) {
docs.add(client().prepareIndex("source", "test", Integer.toString(i)).setSource("test", "words words"));
}
indexRandom(true, docs);
}
}

View File

@ -0,0 +1,77 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.index.mapper.internal.RoutingFieldMapper;
/**
* Index-by-search test for ttl, timestamp, and routing.
*/
public class ReindexMetadataTests extends AbstractAsyncBulkIndexbyScrollActionMetadataTestCase<ReindexRequest, ReindexResponse> {
public void testRoutingCopiedByDefault() throws Exception {
IndexRequest index = new IndexRequest();
action().copyMetadata(index, doc(RoutingFieldMapper.NAME, "foo"));
assertEquals("foo", index.routing());
}
public void testRoutingCopiedIfRequested() throws Exception {
TransportReindexAction.AsyncIndexBySearchAction action = action();
action.mainRequest.getDestination().routing("keep");
IndexRequest index = new IndexRequest();
action.copyMetadata(index, doc(RoutingFieldMapper.NAME, "foo"));
assertEquals("foo", index.routing());
}
public void testRoutingDiscardedIfRequested() throws Exception {
TransportReindexAction.AsyncIndexBySearchAction action = action();
action.mainRequest.getDestination().routing("discard");
IndexRequest index = new IndexRequest();
action.copyMetadata(index, doc(RoutingFieldMapper.NAME, "foo"));
assertEquals(null, index.routing());
}
public void testRoutingSetIfRequested() throws Exception {
TransportReindexAction.AsyncIndexBySearchAction action = action();
action.mainRequest.getDestination().routing("=cat");
IndexRequest index = new IndexRequest();
action.copyMetadata(index, doc(RoutingFieldMapper.NAME, "foo"));
assertEquals("cat", index.routing());
}
public void testRoutingSetIfWithDegenerateValue() throws Exception {
TransportReindexAction.AsyncIndexBySearchAction action = action();
action.mainRequest.getDestination().routing("==]");
IndexRequest index = new IndexRequest();
action.copyMetadata(index, doc(RoutingFieldMapper.NAME, "foo"));
assertEquals("=]", index.routing());
}
@Override
protected TransportReindexAction.AsyncIndexBySearchAction action() {
return new TransportReindexAction.AsyncIndexBySearchAction(task, logger, null, null, threadPool, request(), listener());
}
@Override
protected ReindexRequest request() {
return new ReindexRequest(new SearchRequest(), new IndexRequest());
}
}

View File

@ -0,0 +1,112 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
import org.elasticsearch.index.query.QueryBuilder;
import static org.elasticsearch.index.query.QueryBuilders.hasParentQuery;
import static org.elasticsearch.index.query.QueryBuilders.idsQuery;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits;
import static org.hamcrest.Matchers.equalTo;
/**
* Index-by-search tests for parent/child.
*/
public class ReindexParentChildTests extends ReindexTestCase {
QueryBuilder<?> findsCountry;
QueryBuilder<?> findsCity;
QueryBuilder<?> findsNeighborhood;
public void testParentChild() throws Exception {
createParentChildIndex("source");
createParentChildIndex("dest");
createParentChildDocs("source");
// Copy parent to the new index
ReindexRequestBuilder copy = reindex().source("source").destination("dest").filter(findsCountry).refresh(true);
assertThat(copy.get(), responseMatcher().created(1));
// Copy the child to a new index
copy = reindex().source("source").destination("dest").filter(findsCity).refresh(true);
assertThat(copy.get(), responseMatcher().created(1));
// Make sure parent/child is intact on that index
assertSearchHits(client().prepareSearch("dest").setQuery(findsCity).get(), "pittsburgh");
// Copy the grandchild to a new index
copy = reindex().source("source").destination("dest").filter(findsNeighborhood).refresh(true);
assertThat(copy.get(), responseMatcher().created(1));
// Make sure parent/child is intact on that index
assertSearchHits(client().prepareSearch("dest").setQuery(findsNeighborhood).get(),
"make-believe");
// Copy the parent/child/grandchild structure all at once to a third index
createParentChildIndex("dest_all_at_once");
copy = reindex().source("source").destination("dest_all_at_once").refresh(true);
assertThat(copy.get(), responseMatcher().created(3));
// Make sure parent/child/grandchild is intact there too
assertSearchHits(client().prepareSearch("dest_all_at_once").setQuery(findsNeighborhood).get(),
"make-believe");
}
public void testErrorMessageWhenBadParentChild() throws Exception {
createParentChildIndex("source");
createParentChildDocs("source");
ReindexRequestBuilder copy = reindex().source("source").destination("dest").filter(findsCity);
try {
copy.get();
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), equalTo("Can't specify parent if no parent field has been configured"));
}
}
/**
* Setup a parent/child index and return a query that should find the child
* using the parent.
*/
private void createParentChildIndex(String indexName) throws Exception {
CreateIndexRequestBuilder create = client().admin().indices().prepareCreate(indexName);
create.addMapping("city", "{\"_parent\": {\"type\": \"country\"}}");
create.addMapping("neighborhood", "{\"_parent\": {\"type\": \"city\"}}");
assertAcked(create);
ensureGreen();
}
private void createParentChildDocs(String indexName) throws Exception {
indexRandom(true, client().prepareIndex(indexName, "country", "united states").setSource("foo", "bar"),
client().prepareIndex(indexName, "city", "pittsburgh").setParent("united states").setSource("foo", "bar"),
client().prepareIndex(indexName, "neighborhood", "make-believe").setParent("pittsburgh")
.setSource("foo", "bar").setRouting("united states"));
findsCountry = idsQuery("country").addIds("united states");
findsCity = hasParentQuery("country", findsCountry);
findsNeighborhood = hasParentQuery("city", findsCity);
// Make sure we built the parent/child relationship
assertSearchHits(client().prepareSearch(indexName).setQuery(findsCity).get(), "pittsburgh");
assertSearchHits(client().prepareSearch(indexName).setQuery(findsNeighborhood).get(), "make-believe");
}
}

View File

@ -0,0 +1,40 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import com.carrotsearch.randomizedtesting.annotations.Name;
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
import org.elasticsearch.test.rest.ESRestTestCase;
import org.elasticsearch.test.rest.RestTestCandidate;
import org.elasticsearch.test.rest.parser.RestTestParseException;
import java.io.IOException;
public class ReindexRestIT extends ESRestTestCase {
public ReindexRestIT(@Name("yaml") RestTestCandidate testCandidate) {
super(testCandidate);
}
@ParametersFactory
public static Iterable<Object[]> parameters() throws IOException, RestTestParseException {
return ESRestTestCase.createParameters(0, 1);
}
}

View File

@ -0,0 +1,111 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.support.AutoCreateIndex;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.AliasMetaData;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.test.ESTestCase;
import static org.hamcrest.Matchers.containsString;
/**
* Tests that indexing from an index back into itself fails the request.
*/
public class ReindexSameIndexTests extends ESTestCase {
private static final ClusterState STATE = ClusterState.builder(new ClusterName("test")).metaData(MetaData.builder()
.put(index("target", "target_alias", "target_multi"), true)
.put(index("target2", "target_multi"), true)
.put(index("foo"), true)
.put(index("bar"), true)
.put(index("baz"), true)
.put(index("source", "source_multi"), true)
.put(index("source2", "source_multi"), true)).build();
private static final IndexNameExpressionResolver INDEX_NAME_EXPRESSION_RESOLVER = new IndexNameExpressionResolver(Settings.EMPTY);
private static final AutoCreateIndex AUTO_CREATE_INDEX = new AutoCreateIndex(Settings.EMPTY, INDEX_NAME_EXPRESSION_RESOLVER);
public void testObviousCases() throws Exception {
fails("target", "target");
fails("target", "foo", "bar", "target", "baz");
fails("target", "foo", "bar", "target", "baz", "target");
succeeds("target", "source");
succeeds("target", "source", "source2");
}
public void testAliasesContainTarget() throws Exception {
fails("target", "target_alias");
fails("target_alias", "target");
fails("target", "foo", "bar", "target_alias", "baz");
fails("target_alias", "foo", "bar", "target_alias", "baz");
fails("target_alias", "foo", "bar", "target", "baz");
fails("target", "foo", "bar", "target_alias", "target_alias");
fails("target", "target_multi");
fails("target", "foo", "bar", "target_multi", "baz");
succeeds("target", "source_multi");
succeeds("target", "source", "source2", "source_multi");
}
public void testTargetIsAlias() throws Exception {
try {
succeeds("target_multi", "foo");
fail("Expected failure");
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), containsString("Alias [target_multi] has more than one indices associated with it [["));
// The index names can come in either order
assertThat(e.getMessage(), containsString("target"));
assertThat(e.getMessage(), containsString("target2"));
}
}
private void fails(String target, String... sources) throws Exception {
try {
succeeds(target, sources);
fail("Expected an exception");
} catch (ActionRequestValidationException e) {
assertThat(e.getMessage(),
containsString("reindex cannot write into an index its reading from [target]"));
}
}
private void succeeds(String target, String... sources) throws Exception {
TransportReindexAction.validateAgainstAliases(new SearchRequest(sources), new IndexRequest(target), INDEX_NAME_EXPRESSION_RESOLVER,
AUTO_CREATE_INDEX, STATE);
}
private static IndexMetaData index(String name, String... aliases) {
IndexMetaData.Builder builder = IndexMetaData.builder(name).settings(Settings.builder()
.put("index.version.created", Version.CURRENT.id)
.put("index.number_of_shards", 1)
.put("index.number_of_replicas", 1));
for (String alias: aliases) {
builder.putAlias(AliasMetaData.builder(alias).build());
}
return builder.build();
}
}

View File

@ -0,0 +1,139 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.common.lucene.uid.Versions;
import java.util.Map;
import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
import static org.hamcrest.Matchers.containsString;
/**
* Tests index-by-search with a script modifying the documents.
*/
public class ReindexScriptTests extends AbstractAsyncBulkIndexByScrollActionScriptTestCase<ReindexRequest, ReindexResponse> {
public void testSetIndex() throws Exception {
Object dest = randomFrom(new Object[] {234, 234L, "pancake"});
IndexRequest index = applyScript((Map<String, Object> ctx) -> ctx.put("_index", dest));
assertEquals(dest.toString(), index.index());
}
public void testSettingIndexToNullIsError() throws Exception {
try {
applyScript((Map<String, Object> ctx) -> ctx.put("_index", null));
} catch (NullPointerException e) {
assertThat(e.getMessage(), containsString("Can't reindex without a destination index!"));
}
}
public void testSetType() throws Exception {
Object type = randomFrom(new Object[] {234, 234L, "pancake"});
IndexRequest index = applyScript((Map<String, Object> ctx) -> ctx.put("_type", type));
assertEquals(type.toString(), index.type());
}
public void testSettingTypeToNullIsError() throws Exception {
try {
applyScript((Map<String, Object> ctx) -> ctx.put("_type", null));
} catch (NullPointerException e) {
assertThat(e.getMessage(), containsString("Can't reindex without a destination type!"));
}
}
public void testSetId() throws Exception {
Object id = randomFrom(new Object[] {null, 234, 234L, "pancake"});
IndexRequest index = applyScript((Map<String, Object> ctx) -> ctx.put("_id", id));
if (id == null) {
assertNull(index.id());
} else {
assertEquals(id.toString(), index.id());
}
}
public void testSetVersion() throws Exception {
Number version = randomFrom(new Number[] {null, 234, 234L});
IndexRequest index = applyScript((Map<String, Object> ctx) -> ctx.put("_version", version));
if (version == null) {
assertEquals(Versions.MATCH_ANY, index.version());
} else {
assertEquals(version.longValue(), index.version());
}
}
public void testSettingVersionToJunkIsAnError() throws Exception {
Object junkVersion = randomFrom(new Object[] { "junk", Math.PI });
try {
applyScript((Map<String, Object> ctx) -> ctx.put("_version", junkVersion));
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), containsString("_version may only be set to an int or a long but was ["));
assertThat(e.getMessage(), containsString(junkVersion.toString()));
}
}
public void testSetParent() throws Exception {
String parent = randomRealisticUnicodeOfLengthBetween(5, 20);
IndexRequest index = applyScript((Map<String, Object> ctx) -> ctx.put("_parent", parent));
assertEquals(parent, index.parent());
}
public void testSetRouting() throws Exception {
String routing = randomRealisticUnicodeOfLengthBetween(5, 20);
IndexRequest index = applyScript((Map<String, Object> ctx) -> ctx.put("_routing", routing));
assertEquals(routing, index.routing());
}
public void testSetTimestamp() throws Exception {
String timestamp = randomFrom(null, "now", "1234");
IndexRequest index = applyScript((Map<String, Object> ctx) -> ctx.put("_timestamp", timestamp));
assertEquals(timestamp, index.timestamp());
}
public void testSetTtl() throws Exception {
Number ttl = randomFrom(new Number[] { null, 1233214, 134143797143L });
IndexRequest index = applyScript((Map<String, Object> ctx) -> ctx.put("_ttl", ttl));
if (ttl == null) {
assertEquals(null, index.ttl());
} else {
assertEquals(timeValueMillis(ttl.longValue()), index.ttl());
}
}
public void testSettingTtlToJunkIsAnError() throws Exception {
Object junkTtl = randomFrom(new Object[] { "junk", Math.PI });
try {
applyScript((Map<String, Object> ctx) -> ctx.put("_ttl", junkTtl));
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), containsString("_ttl may only be set to an int or a long but was ["));
assertThat(e.getMessage(), containsString(junkTtl.toString()));
}
}
@Override
protected ReindexRequest request() {
return new ReindexRequest();
}
@Override
protected AbstractAsyncBulkIndexByScrollAction<ReindexRequest, ReindexResponse> action() {
return new TransportReindexAction.AsyncIndexBySearchAction(task, logger, null, null, threadPool, request(), listener());
}
}

View File

@ -0,0 +1,77 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
import org.hamcrest.Description;
import org.hamcrest.Matcher;
import java.util.Collection;
import static org.elasticsearch.test.ESIntegTestCase.Scope.SUITE;
import static org.hamcrest.Matchers.equalTo;
@ClusterScope(scope = SUITE, transportClientRatio = 0)
public abstract class ReindexTestCase extends ESIntegTestCase {
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return pluginList(ReindexPlugin.class);
}
protected ReindexRequestBuilder reindex() {
return ReindexAction.INSTANCE.newRequestBuilder(client());
}
public IndexBySearchResponseMatcher responseMatcher() {
return new IndexBySearchResponseMatcher();
}
public static class IndexBySearchResponseMatcher
extends AbstractBulkIndexByScrollResponseMatcher<ReindexResponse, IndexBySearchResponseMatcher> {
private Matcher<Long> createdMatcher = equalTo(0L);
public IndexBySearchResponseMatcher created(Matcher<Long> updatedMatcher) {
this.createdMatcher = updatedMatcher;
return this;
}
public IndexBySearchResponseMatcher created(long created) {
return created(equalTo(created));
}
@Override
protected boolean matchesSafely(ReindexResponse item) {
return super.matchesSafely(item) && createdMatcher.matches(item.getCreated());
}
@Override
public void describeTo(Description description) {
super.describeTo(description);
description.appendText(" and created matches ").appendDescriptionOf(createdMatcher);
}
@Override
protected IndexBySearchResponseMatcher self() {
return this;
}
}
}

View File

@ -0,0 +1,143 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.action.get.GetResponse;
import static org.elasticsearch.action.index.IndexRequest.OpType.CREATE;
import static org.elasticsearch.index.VersionType.EXTERNAL;
import static org.elasticsearch.index.VersionType.INTERNAL;
public class ReindexVersioningTests extends ReindexTestCase {
private static final int SOURCE_VERSION = 4;
private static final int OLDER_VERSION = 1;
private static final int NEWER_VERSION = 10;
public void testExternalVersioningCreatesWhenAbsentAndSetsVersion() throws Exception {
setupSourceAbsent();
assertThat(reindexExternal(), responseMatcher().created(1));
assertDest("source", SOURCE_VERSION);
}
public void testExternalVersioningUpdatesOnOlderAndSetsVersion() throws Exception {
setupDestOlder();
assertThat(reindexExternal(), responseMatcher().updated(1));
assertDest("source", SOURCE_VERSION);
}
public void testExternalVersioningVersionConflictsOnNewer() throws Exception {
setupDestNewer();
assertThat(reindexExternal(), responseMatcher().versionConflicts(1));
assertDest("dest", NEWER_VERSION);
}
public void testInternalVersioningCreatesWhenAbsent() throws Exception {
setupSourceAbsent();
assertThat(reindexInternal(), responseMatcher().created(1));
assertDest("source", 1);
}
public void testInternalVersioningUpdatesOnOlder() throws Exception {
setupDestOlder();
assertThat(reindexInternal(), responseMatcher().updated(1));
assertDest("source", OLDER_VERSION + 1);
}
public void testInternalVersioningUpdatesOnNewer() throws Exception {
setupDestNewer();
assertThat(reindexInternal(), responseMatcher().updated(1));
assertDest("source", NEWER_VERSION + 1);
}
public void testCreateCreatesWhenAbsent() throws Exception {
setupSourceAbsent();
assertThat(reindexCreate(), responseMatcher().created(1));
assertDest("source", 1);
}
public void testCreateVersionConflictsOnOlder() throws Exception {
setupDestOlder();
assertThat(reindexCreate(), responseMatcher().versionConflicts(1));
assertDest("dest", OLDER_VERSION);
}
public void testCreateVersionConflictsOnNewer() throws Exception {
setupDestNewer();
assertThat(reindexCreate(), responseMatcher().versionConflicts(1));
assertDest("dest", NEWER_VERSION);
}
/**
* Perform a reindex with EXTERNAL versioning which has "refresh" semantics.
*/
private ReindexResponse reindexExternal() {
ReindexRequestBuilder reindex = reindex().source("source").destination("dest").abortOnVersionConflict(false);
reindex.destination().setVersionType(EXTERNAL);
return reindex.get();
}
/**
* Perform a reindex with INTERNAL versioning which has "overwrite" semantics.
*/
private ReindexResponse reindexInternal() {
ReindexRequestBuilder reindex = reindex().source("source").destination("dest").abortOnVersionConflict(false);
reindex.destination().setVersionType(INTERNAL);
return reindex.get();
}
/**
* Perform a reindex with CREATE OpType which has "create" semantics.
*/
private ReindexResponse reindexCreate() {
ReindexRequestBuilder reindex = reindex().source("source").destination("dest").abortOnVersionConflict(false);
reindex.destination().setOpType(CREATE);
return reindex.get();
}
private void setupSourceAbsent() throws Exception {
indexRandom(true, client().prepareIndex("source", "test", "test").setVersionType(EXTERNAL)
.setVersion(SOURCE_VERSION).setSource("foo", "source"));
assertEquals(SOURCE_VERSION, client().prepareGet("source", "test", "test").get().getVersion());
}
private void setupDest(int version) throws Exception {
setupSourceAbsent();
indexRandom(true, client().prepareIndex("dest", "test", "test").setVersionType(EXTERNAL)
.setVersion(version).setSource("foo", "dest"));
assertEquals(version, client().prepareGet("dest", "test", "test").get().getVersion());
}
private void setupDestOlder() throws Exception {
setupDest(OLDER_VERSION);
}
private void setupDestNewer() throws Exception {
setupDest(NEWER_VERSION);
}
private void assertDest(String fooValue, int version) {
GetResponse get = client().prepareGet("dest", "test", "test").get();
assertEquals(fooValue, get.getSource().get("foo"));
assertEquals(version, get.getVersion());
}
}

View File

@ -0,0 +1,198 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.action.WriteConsistencyLevel;
import org.elasticsearch.action.bulk.BulkItemResponse.Failure;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.ShardSearchFailure;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.lucene.uid.Versions;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.Index;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptService.ScriptType;
import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
import java.util.List;
import static java.util.Collections.emptyList;
import static java.util.Collections.emptyMap;
import static java.util.Collections.singletonList;
import static org.apache.lucene.util.TestUtil.randomSimpleString;
import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
/**
* Round trip tests for all Streamable things declared in this plugin.
*/
public class RoundTripTests extends ESTestCase {
public void testReindexRequest() throws IOException {
ReindexRequest reindex = new ReindexRequest(new SearchRequest(), new IndexRequest());
randomRequest(reindex);
reindex.getDestination().version(randomFrom(Versions.MATCH_ANY, Versions.MATCH_DELETED, 12L, 1L, 123124L, 12L));
reindex.getDestination().index("test");
ReindexRequest tripped = new ReindexRequest();
roundTrip(reindex, tripped);
assertRequestEquals(reindex, tripped);
assertEquals(reindex.getDestination().version(), tripped.getDestination().version());
assertEquals(reindex.getDestination().index(), tripped.getDestination().index());
}
public void testUpdateByQueryRequest() throws IOException {
UpdateByQueryRequest update = new UpdateByQueryRequest(new SearchRequest());
randomRequest(update);
UpdateByQueryRequest tripped = new UpdateByQueryRequest();
roundTrip(update, tripped);
assertRequestEquals(update, tripped);
}
private void randomRequest(AbstractBulkIndexByScrollRequest<?> request) {
request.getSearchRequest().indices("test");
request.getSearchRequest().source().size(between(1, 1000));
request.setSize(random().nextBoolean() ? between(1, Integer.MAX_VALUE) : -1);
request.setAbortOnVersionConflict(random().nextBoolean());
request.setRefresh(rarely());
request.setTimeout(TimeValue.parseTimeValue(randomTimeValue(), null, "test"));
request.setConsistency(randomFrom(WriteConsistencyLevel.values()));
request.setScript(random().nextBoolean() ? null : randomScript());
}
private void assertRequestEquals(AbstractBulkIndexByScrollRequest<?> request,
AbstractBulkIndexByScrollRequest<?> tripped) {
assertArrayEquals(request.getSearchRequest().indices(), tripped.getSearchRequest().indices());
assertEquals(request.getSearchRequest().source().size(), tripped.getSearchRequest().source().size());
assertEquals(request.isAbortOnVersionConflict(), tripped.isAbortOnVersionConflict());
assertEquals(request.isRefresh(), tripped.isRefresh());
assertEquals(request.getTimeout(), tripped.getTimeout());
assertEquals(request.getConsistency(), tripped.getConsistency());
assertEquals(request.getScript(), tripped.getScript());
assertEquals(request.getRetryBackoffInitialTime(), tripped.getRetryBackoffInitialTime());
assertEquals(request.getMaxRetries(), tripped.getMaxRetries());
}
public void testBulkByTaskStatus() throws IOException {
BulkByScrollTask.Status status = randomStatus();
BytesStreamOutput out = new BytesStreamOutput();
status.writeTo(out);
BulkByScrollTask.Status tripped = new BulkByScrollTask.Status(out.bytes().streamInput());
assertTaskStatusEquals(status, tripped);
}
public void testReindexResponse() throws IOException {
ReindexResponse response = new ReindexResponse(timeValueMillis(randomPositiveLong()), randomStatus(), randomIndexingFailures(),
randomSearchFailures());
ReindexResponse tripped = new ReindexResponse();
roundTrip(response, tripped);
assertResponseEquals(response, tripped);
}
public void testBulkIndexByScrollResponse() throws IOException {
BulkIndexByScrollResponse response = new BulkIndexByScrollResponse(timeValueMillis(randomPositiveLong()), randomStatus(),
randomIndexingFailures(), randomSearchFailures());
BulkIndexByScrollResponse tripped = new BulkIndexByScrollResponse();
roundTrip(response, tripped);
assertResponseEquals(response, tripped);
}
private BulkByScrollTask.Status randomStatus() {
return new BulkByScrollTask.Status(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
randomPositiveInt(), randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
random().nextBoolean() ? null : randomSimpleString(random()));
}
private List<Failure> randomIndexingFailures() {
return usually() ? emptyList()
: singletonList(new Failure(randomSimpleString(random()), randomSimpleString(random()),
randomSimpleString(random()), new IllegalArgumentException("test")));
}
private List<ShardSearchFailure> randomSearchFailures() {
if (usually()) {
return emptyList();
}
Index index = new Index(randomSimpleString(random()), "uuid");
return singletonList(new ShardSearchFailure(randomSimpleString(random()),
new SearchShardTarget(randomSimpleString(random()), index, randomInt()), randomFrom(RestStatus.values())));
}
private void roundTrip(Streamable example, Streamable empty) throws IOException {
BytesStreamOutput out = new BytesStreamOutput();
example.writeTo(out);
empty.readFrom(out.bytes().streamInput());
}
private Script randomScript() {
return new Script(randomSimpleString(random()), // Name
randomFrom(ScriptType.values()), // Type
random().nextBoolean() ? null : randomSimpleString(random()), // Language
emptyMap()); // Params
}
private long randomPositiveLong() {
long l;
do {
l = randomLong();
} while (l < 0);
return l;
}
private int randomPositiveInt() {
return randomInt(Integer.MAX_VALUE);
}
private void assertResponseEquals(BulkIndexByScrollResponse expected, BulkIndexByScrollResponse actual) {
assertEquals(expected.getTook(), actual.getTook());
assertTaskStatusEquals(expected.getStatus(), actual.getStatus());
assertEquals(expected.getIndexingFailures().size(), actual.getIndexingFailures().size());
for (int i = 0; i < expected.getIndexingFailures().size(); i++) {
Failure expectedFailure = expected.getIndexingFailures().get(i);
Failure actualFailure = actual.getIndexingFailures().get(i);
assertEquals(expectedFailure.getIndex(), actualFailure.getIndex());
assertEquals(expectedFailure.getType(), actualFailure.getType());
assertEquals(expectedFailure.getId(), actualFailure.getId());
assertEquals(expectedFailure.getMessage(), actualFailure.getMessage());
assertEquals(expectedFailure.getStatus(), actualFailure.getStatus());
}
assertEquals(expected.getSearchFailures().size(), actual.getSearchFailures().size());
for (int i = 0; i < expected.getSearchFailures().size(); i++) {
ShardSearchFailure expectedFailure = expected.getSearchFailures().get(i);
ShardSearchFailure actualFailure = actual.getSearchFailures().get(i);
assertEquals(expectedFailure.shard(), actualFailure.shard());
assertEquals(expectedFailure.status(), actualFailure.status());
// We can't use getCause because throwable doesn't implement equals
assertEquals(expectedFailure.reason(), actualFailure.reason());
}
}
private void assertTaskStatusEquals(BulkByScrollTask.Status expected, BulkByScrollTask.Status actual) {
assertEquals(expected.getUpdated(), actual.getUpdated());
assertEquals(expected.getCreated(), actual.getCreated());
assertEquals(expected.getDeleted(), actual.getDeleted());
assertEquals(expected.getBatches(), actual.getBatches());
assertEquals(expected.getVersionConflicts(), actual.getVersionConflicts());
assertEquals(expected.getNoops(), actual.getNoops());
assertEquals(expected.getRetries(), actual.getRetries());
}
}

View File

@ -0,0 +1,55 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.script.ExecutableScript;
import java.util.Map;
import java.util.function.Consumer;
public class SimpleExecutableScript implements ExecutableScript {
private final Consumer<Map<String, Object>> script;
private Map<String, Object> ctx;
public SimpleExecutableScript(Consumer<Map<String, Object>> script) {
this.script = script;
}
@Override
public Object run() {
script.accept(ctx);
return null;
}
@Override
@SuppressWarnings("unchecked")
public void setNextVar(String name, Object value) {
if ("ctx".equals(name)) {
ctx = (Map<String, Object>) value;
} else {
throw new IllegalArgumentException("Unsupported var [" + name + "]");
}
}
@Override
public Object unwrap(Object value) {
return value;
}
}

View File

@ -0,0 +1,107 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
import org.elasticsearch.search.sort.SortOrder;
import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
public class UpdateByQueryBasicTests extends UpdateByQueryTestCase {
public void testBasics() throws Exception {
indexRandom(true, client().prepareIndex("test", "test", "1").setSource("foo", "a"),
client().prepareIndex("test", "test", "2").setSource("foo", "a"),
client().prepareIndex("test", "test", "3").setSource("foo", "b"),
client().prepareIndex("test", "test", "4").setSource("foo", "c"));
assertHitCount(client().prepareSearch("test").setTypes("test").setSize(0).get(), 4);
assertEquals(1, client().prepareGet("test", "test", "1").get().getVersion());
assertEquals(1, client().prepareGet("test", "test", "4").get().getVersion());
// Reindex all the docs
assertThat(request().source("test").refresh(true).get(), responseMatcher().updated(4));
assertEquals(2, client().prepareGet("test", "test", "1").get().getVersion());
assertEquals(2, client().prepareGet("test", "test", "4").get().getVersion());
// Now none of them
assertThat(request().source("test").filter(termQuery("foo", "no_match")).refresh(true).get(), responseMatcher().updated(0));
assertEquals(2, client().prepareGet("test", "test", "1").get().getVersion());
assertEquals(2, client().prepareGet("test", "test", "4").get().getVersion());
// Now half of them
assertThat(request().source("test").filter(termQuery("foo", "a")).refresh(true).get(), responseMatcher().updated(2));
assertEquals(3, client().prepareGet("test", "test", "1").get().getVersion());
assertEquals(3, client().prepareGet("test", "test", "2").get().getVersion());
assertEquals(2, client().prepareGet("test", "test", "3").get().getVersion());
assertEquals(2, client().prepareGet("test", "test", "4").get().getVersion());
// Limit with size
UpdateByQueryRequestBuilder request = request().source("test").size(3).refresh(true);
request.source().addSort("foo", SortOrder.ASC);
assertThat(request.get(), responseMatcher().updated(3));
// Only the first three documents are updated because of sort
assertEquals(4, client().prepareGet("test", "test", "1").get().getVersion());
assertEquals(4, client().prepareGet("test", "test", "2").get().getVersion());
assertEquals(3, client().prepareGet("test", "test", "3").get().getVersion());
assertEquals(2, client().prepareGet("test", "test", "4").get().getVersion());
}
public void testRefreshIsFalseByDefault() throws Exception {
refreshTestCase(null, false);
}
public void testRefreshFalseDoesntMakeVisible() throws Exception {
refreshTestCase(false, false);
}
public void testRefreshTrueMakesVisible() throws Exception {
refreshTestCase(true, true);
}
/**
* Executes an update_by_query on an index with -1 refresh_interval and
* checks that the documents are visible properly.
*/
private void refreshTestCase(Boolean refresh, boolean visible) throws Exception {
CreateIndexRequestBuilder create = client().admin().indices().prepareCreate("test").setSettings("refresh_interval", -1);
create.addMapping("test", "{\"dynamic\": \"false\"}");
assertAcked(create);
ensureYellow();
indexRandom(true, client().prepareIndex("test", "test", "1").setSource("foo", "a"),
client().prepareIndex("test", "test", "2").setSource("foo", "a"),
client().prepareIndex("test", "test", "3").setSource("foo", "b"),
client().prepareIndex("test", "test", "4").setSource("foo", "c"));
assertHitCount(client().prepareSearch("test").setQuery(matchQuery("foo", "a")).setSize(0).get(), 0);
// Now make foo searchable
assertAcked(client().admin().indices().preparePutMapping("test").setType("test")
.setSource("{\"test\": {\"properties\":{\"foo\": {\"type\": \"string\"}}}}"));
UpdateByQueryRequestBuilder update = request().source("test");
if (refresh != null) {
update.refresh(refresh);
}
assertThat(update.get(), responseMatcher().updated(4));
assertHitCount(client().prepareSearch("test").setQuery(matchQuery("foo", "a")).setSize(0).get(), visible ? 2 : 0);
}
}

View File

@ -0,0 +1,53 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.plugins.Plugin;
import java.util.Collection;
import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
import static org.hamcrest.Matchers.equalTo;
/**
* Tests that you can actually cancel an update-by-query request and all the plumbing works. Doesn't test all of the different cancellation
* places - that is the responsibility of {@link AsyncBulkByScrollActionTests} which have more precise control to simulate failures but do
* not exercise important portion of the stack like transport and task management.
*/
public class UpdateByQueryCancelTests extends UpdateByQueryTestCase {
public void testCancel() throws Exception {
BulkIndexByScrollResponse response = CancelTestUtils.testCancel(this, request(), UpdateByQueryAction.NAME);
assertThat(response, responseMatcher().updated(1).reasonCancelled(equalTo("by user request")));
refresh("source");
assertHitCount(client().prepareSearch("source").setSize(0).setQuery(matchQuery("giraffes", "giraffes")).get(), 1);
}
@Override
protected int numberOfShards() {
return 1;
}
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return CancelTestUtils.nodePlugins();
}
}

View File

@ -0,0 +1,43 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.index.mapper.internal.RoutingFieldMapper;
public class UpdateByQueryMetadataTests
extends AbstractAsyncBulkIndexbyScrollActionMetadataTestCase<UpdateByQueryRequest, BulkIndexByScrollResponse> {
public void testRoutingIsCopied() throws Exception {
IndexRequest index = new IndexRequest();
action().copyMetadata(index, doc(RoutingFieldMapper.NAME, "foo"));
assertEquals("foo", index.routing());
}
@Override
protected TransportUpdateByQueryAction.AsyncIndexBySearchAction action() {
return new TransportUpdateByQueryAction.AsyncIndexBySearchAction(task, logger, null, null, threadPool, request(), listener());
}
@Override
protected UpdateByQueryRequest request() {
return new UpdateByQueryRequest(new SearchRequest());
}
}

View File

@ -0,0 +1,52 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
import java.util.Collection;
import static org.elasticsearch.test.ESIntegTestCase.Scope.SUITE;
@ClusterScope(scope = SUITE, transportClientRatio = 0)
public abstract class UpdateByQueryTestCase extends ESIntegTestCase {
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return pluginList(ReindexPlugin.class);
}
protected UpdateByQueryRequestBuilder request() {
return UpdateByQueryAction.INSTANCE.newRequestBuilder(client());
}
public BulkIndexbyScrollResponseMatcher responseMatcher() {
return new BulkIndexbyScrollResponseMatcher();
}
public static class BulkIndexbyScrollResponseMatcher extends
AbstractBulkIndexByScrollResponseMatcher<BulkIndexByScrollResponse, BulkIndexbyScrollResponseMatcher> {
@Override
protected BulkIndexbyScrollResponseMatcher self() {
return this;
}
}
}

View File

@ -0,0 +1,99 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.reindex;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.index.engine.VersionConflictEngineException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import static org.apache.lucene.util.TestUtil.randomSimpleString;
import static org.hamcrest.Matchers.either;
import static org.hamcrest.Matchers.equalTo;
/**
* Mutates a document while update-by-query-ing it and asserts that the mutation
* always sticks. Update-by-query should never revert documents.
*/
public class UpdateByQueryWhileModifyingTests extends UpdateByQueryTestCase {
private static final int MAX_MUTATIONS = 50;
private static final int MAX_ATTEMPTS = 10;
public void testUpdateWhileReindexing() throws Exception {
AtomicReference<String> value = new AtomicReference<>(randomSimpleString(random()));
indexRandom(true, client().prepareIndex("test", "test", "test").setSource("test", value.get()));
AtomicReference<Throwable> failure = new AtomicReference<>();
AtomicBoolean keepUpdating = new AtomicBoolean(true);
Thread updater = new Thread(() -> {
while (keepUpdating.get()) {
try {
assertThat(request().source("test").refresh(true).abortOnVersionConflict(false).get(), responseMatcher()
.updated(either(equalTo(0L)).or(equalTo(1L))).versionConflicts(either(equalTo(0L)).or(equalTo(1L))));
} catch (Throwable t) {
failure.set(t);
}
}
});
updater.start();
try {
for (int i = 0; i < MAX_MUTATIONS; i++) {
GetResponse get = client().prepareGet("test", "test", "test").get();
assertEquals(value.get(), get.getSource().get("test"));
value.set(randomSimpleString(random()));
IndexRequestBuilder index = client().prepareIndex("test", "test", "test").setSource("test", value.get())
.setRefresh(true);
/*
* Update by query increments the version number so concurrent
* indexes might get version conflict exceptions so we just
* blindly retry.
*/
int attempts = 0;
while (true) {
attempts++;
try {
index.setVersion(get.getVersion()).get();
break;
} catch (VersionConflictEngineException e) {
if (attempts >= MAX_ATTEMPTS) {
throw new RuntimeException(
"Failed to index after [" + MAX_ATTEMPTS + "] attempts. Too many version conflicts!");
}
logger.info(
"Caught expected version conflict trying to perform mutation number {} with version {}. Retrying.",
i, get.getVersion());
get = client().prepareGet("test", "test", "test").get();
}
}
}
} finally {
keepUpdating.set(false);
updater.join(TimeUnit.SECONDS.toMillis(10));
if (failure.get() != null) {
throw new RuntimeException(failure.get());
}
}
}
}

Some files were not shown because too many files have changed in this diff Show More