Merge pull request #16861 from nik9000/reindex_is_ready
Reindex required some parsing changes for search requests to support differing defaults from the regular search api.
This commit is contained in:
commit
c7c8bb357a
|
@ -21,18 +21,16 @@ package org.elasticsearch.action;
|
|||
|
||||
/**
|
||||
* A listener for action responses or failures.
|
||||
*
|
||||
*
|
||||
*/
|
||||
public interface ActionListener<Response> {
|
||||
|
||||
/**
|
||||
* A response handler.
|
||||
* Handle action response. This response may constitute a failure or a
|
||||
* success but it is up to the listener to make that decision.
|
||||
*/
|
||||
void onResponse(Response response);
|
||||
|
||||
/**
|
||||
* A failure handler.
|
||||
* A failure caused by an exception at some phase of the task.
|
||||
*/
|
||||
void onFailure(Throwable e);
|
||||
}
|
||||
|
|
|
@ -28,7 +28,9 @@ import org.elasticsearch.action.update.UpdateResponse;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.StatusToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
@ -76,7 +78,15 @@ public class BulkItemResponse implements Streamable, StatusToXContent {
|
|||
/**
|
||||
* Represents a failure.
|
||||
*/
|
||||
public static class Failure {
|
||||
public static class Failure implements Writeable<Failure>, ToXContent {
|
||||
static final String INDEX_FIELD = "index";
|
||||
static final String TYPE_FIELD = "type";
|
||||
static final String ID_FIELD = "id";
|
||||
static final String CAUSE_FIELD = "cause";
|
||||
static final String STATUS_FIELD = "status";
|
||||
|
||||
public static final Failure PROTOTYPE = new Failure(null, null, null, null);
|
||||
|
||||
private final String index;
|
||||
private final String type;
|
||||
private final String id;
|
||||
|
@ -126,9 +136,39 @@ public class BulkItemResponse implements Streamable, StatusToXContent {
|
|||
return this.status;
|
||||
}
|
||||
|
||||
/**
|
||||
* The actual cause of the failure.
|
||||
*/
|
||||
public Throwable getCause() {
|
||||
return cause;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Failure readFrom(StreamInput in) throws IOException {
|
||||
return new Failure(in.readString(), in.readString(), in.readOptionalString(), in.readThrowable());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(getIndex());
|
||||
out.writeString(getType());
|
||||
out.writeOptionalString(getId());
|
||||
out.writeThrowable(getCause());
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field(INDEX_FIELD, index);
|
||||
builder.field(TYPE_FIELD, type);
|
||||
if (id != null) {
|
||||
builder.field(ID_FIELD, id);
|
||||
}
|
||||
builder.startObject(CAUSE_FIELD);
|
||||
ElasticsearchException.toXContent(builder, params, cause);
|
||||
builder.endObject();
|
||||
builder.field(STATUS_FIELD, status.getStatus());
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
|
||||
private int id;
|
||||
|
@ -265,11 +305,7 @@ public class BulkItemResponse implements Streamable, StatusToXContent {
|
|||
}
|
||||
|
||||
if (in.readBoolean()) {
|
||||
String fIndex = in.readString();
|
||||
String fType = in.readString();
|
||||
String fId = in.readOptionalString();
|
||||
Throwable throwable = in.readThrowable();
|
||||
failure = new Failure(fIndex, fType, fId, throwable);
|
||||
failure = Failure.PROTOTYPE.readFrom(in);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -294,10 +330,7 @@ public class BulkItemResponse implements Streamable, StatusToXContent {
|
|||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
out.writeString(failure.getIndex());
|
||||
out.writeString(failure.getType());
|
||||
out.writeOptionalString(failure.getId());
|
||||
out.writeThrowable(failure.getCause());
|
||||
failure.writeTo(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -94,6 +94,12 @@ public class BulkShardRequest extends ReplicationRequest<BulkShardRequest> {
|
|||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "shard bulk {" + super.toString() + "}";
|
||||
// This is included in error messages so we'll try to make it somewhat user friendly.
|
||||
StringBuilder b = new StringBuilder("BulkShardRequest to [");
|
||||
b.append(index).append("] containing [").append(items.length).append("] requests");
|
||||
if (refresh) {
|
||||
b.append(" and a refresh");
|
||||
}
|
||||
return b.toString();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,7 +38,7 @@ import java.util.function.Predicate;
|
|||
/**
|
||||
* Encapsulates synchronous and asynchronous retry logic.
|
||||
*/
|
||||
class Retry {
|
||||
public class Retry {
|
||||
private final Class<? extends Throwable> retryOnThrowable;
|
||||
|
||||
private BackoffPolicy backoffPolicy;
|
||||
|
|
|
@ -223,6 +223,13 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
|||
return validationException;
|
||||
}
|
||||
|
||||
/**
|
||||
* The content type that will be used when generating a document from user provided objects like Maps.
|
||||
*/
|
||||
public XContentType getContentType() {
|
||||
return contentType;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the content type that will be used when generating a document from user provided objects (like Map).
|
||||
*/
|
||||
|
@ -294,6 +301,7 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
|||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String parent() {
|
||||
return this.parent;
|
||||
}
|
||||
|
@ -645,7 +653,7 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
|||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
type = in.readString();
|
||||
type = in.readOptionalString();
|
||||
id = in.readOptionalString();
|
||||
routing = in.readOptionalString();
|
||||
parent = in.readOptionalString();
|
||||
|
@ -663,7 +671,7 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
|||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeString(type);
|
||||
out.writeOptionalString(type);
|
||||
out.writeOptionalString(id);
|
||||
out.writeOptionalString(routing);
|
||||
out.writeOptionalString(parent);
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.elasticsearch.common.component.AbstractComponent;
|
|||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.tasks.TaskListener;
|
||||
import org.elasticsearch.tasks.TaskManager;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
|
@ -72,6 +73,13 @@ public abstract class TransportAction<Request extends ActionRequest<Request>, Re
|
|||
* This is a typical behavior.
|
||||
*/
|
||||
public final Task execute(Request request, ActionListener<Response> listener) {
|
||||
/*
|
||||
* While this version of execute could delegate to the TaskListener
|
||||
* version of execute that'd add yet another layer of wrapping on the
|
||||
* listener and prevent us from using the listener bare if there isn't a
|
||||
* task. That just seems like too many objects. Thus the two versions of
|
||||
* this method.
|
||||
*/
|
||||
Task task = taskManager.register("transport", actionName, request);
|
||||
if (task == null) {
|
||||
execute(null, request, listener);
|
||||
|
@ -93,11 +101,32 @@ public abstract class TransportAction<Request extends ActionRequest<Request>, Re
|
|||
return task;
|
||||
}
|
||||
|
||||
public final Task execute(Request request, TaskListener<Response> listener) {
|
||||
Task task = taskManager.register("transport", actionName, request);
|
||||
execute(task, request, new ActionListener<Response>() {
|
||||
@Override
|
||||
public void onResponse(Response response) {
|
||||
if (task != null) {
|
||||
taskManager.unregister(task);
|
||||
}
|
||||
listener.onResponse(task, response);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
if (task != null) {
|
||||
taskManager.unregister(task);
|
||||
}
|
||||
listener.onFailure(task, e);
|
||||
}
|
||||
});
|
||||
return task;
|
||||
}
|
||||
|
||||
/**
|
||||
* Use this method when the transport action should continue to run in the context of the current task
|
||||
*/
|
||||
public final void execute(Task task, Request request, ActionListener<Response> listener) {
|
||||
|
||||
ActionRequestValidationException validationException = request.validate();
|
||||
if (validationException != null) {
|
||||
listener.onFailure(validationException);
|
||||
|
|
|
@ -87,21 +87,35 @@ public class RestSearchAction extends BaseRestHandler {
|
|||
|
||||
@Override
|
||||
public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws IOException {
|
||||
SearchRequest searchRequest;
|
||||
searchRequest = RestSearchAction.parseSearchRequest(queryRegistry, request, parseFieldMatcher, aggParsers);
|
||||
SearchRequest searchRequest = new SearchRequest();
|
||||
RestSearchAction.parseSearchRequest(searchRequest, queryRegistry, request, parseFieldMatcher, aggParsers, null);
|
||||
client.search(searchRequest, new RestStatusToXContentListener<>(channel));
|
||||
}
|
||||
|
||||
public static SearchRequest parseSearchRequest(IndicesQueriesRegistry indicesQueriesRegistry, RestRequest request,
|
||||
ParseFieldMatcher parseFieldMatcher, AggregatorParsers aggParsers) throws IOException {
|
||||
String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
|
||||
SearchRequest searchRequest = new SearchRequest(indices);
|
||||
/**
|
||||
* Parses the rest request on top of the SearchRequest, preserving values
|
||||
* that are not overridden by the rest request.
|
||||
*
|
||||
* @param restContent
|
||||
* override body content to use for the request. If null body
|
||||
* content is read from the request using
|
||||
* RestAction.hasBodyContent.
|
||||
*/
|
||||
public static void parseSearchRequest(SearchRequest searchRequest, IndicesQueriesRegistry indicesQueriesRegistry, RestRequest request,
|
||||
ParseFieldMatcher parseFieldMatcher, AggregatorParsers aggParsers, BytesReference restContent) throws IOException {
|
||||
if (searchRequest.source() == null) {
|
||||
searchRequest.source(new SearchSourceBuilder());
|
||||
}
|
||||
searchRequest.indices(Strings.splitStringByCommaToArray(request.param("index")));
|
||||
// get the content, and put it in the body
|
||||
// add content/source as template if template flag is set
|
||||
boolean isTemplateRequest = request.path().endsWith("/template");
|
||||
final SearchSourceBuilder builder;
|
||||
if (restContent == null) {
|
||||
if (RestActions.hasBodyContent(request)) {
|
||||
BytesReference restContent = RestActions.getRestContent(request);
|
||||
restContent = RestActions.getRestContent(request);
|
||||
}
|
||||
}
|
||||
if (restContent != null) {
|
||||
QueryParseContext context = new QueryParseContext(indicesQueriesRegistry);
|
||||
if (isTemplateRequest) {
|
||||
try (XContentParser parser = XContentFactory.xContent(restContent).createParser(restContent)) {
|
||||
|
@ -110,12 +124,10 @@ public class RestSearchAction extends BaseRestHandler {
|
|||
Template template = TemplateQueryParser.parse(parser, context.parseFieldMatcher(), "params", "template");
|
||||
searchRequest.template(template);
|
||||
}
|
||||
builder = null;
|
||||
} else {
|
||||
builder = RestActions.getRestSearchSource(restContent, indicesQueriesRegistry, parseFieldMatcher, aggParsers);
|
||||
RestActions.parseRestSearchSource(searchRequest.source(), restContent, indicesQueriesRegistry, parseFieldMatcher,
|
||||
aggParsers);
|
||||
}
|
||||
} else {
|
||||
builder = null;
|
||||
}
|
||||
|
||||
// do not allow 'query_and_fetch' or 'dfs_query_and_fetch' search types
|
||||
|
@ -128,15 +140,7 @@ public class RestSearchAction extends BaseRestHandler {
|
|||
} else {
|
||||
searchRequest.searchType(searchType);
|
||||
}
|
||||
if (builder == null) {
|
||||
SearchSourceBuilder extraBuilder = new SearchSourceBuilder();
|
||||
if (parseSearchSource(extraBuilder, request)) {
|
||||
searchRequest.source(extraBuilder);
|
||||
}
|
||||
} else {
|
||||
parseSearchSource(builder, request);
|
||||
searchRequest.source(builder);
|
||||
}
|
||||
parseSearchSource(searchRequest.source(), request);
|
||||
searchRequest.requestCache(request.paramAsBoolean("request_cache", null));
|
||||
|
||||
String scroll = request.param("scroll");
|
||||
|
@ -148,41 +152,35 @@ public class RestSearchAction extends BaseRestHandler {
|
|||
searchRequest.routing(request.param("routing"));
|
||||
searchRequest.preference(request.param("preference"));
|
||||
searchRequest.indicesOptions(IndicesOptions.fromRequest(request, searchRequest.indicesOptions()));
|
||||
|
||||
return searchRequest;
|
||||
}
|
||||
|
||||
private static boolean parseSearchSource(final SearchSourceBuilder searchSourceBuilder, RestRequest request) {
|
||||
|
||||
boolean modified = false;
|
||||
/**
|
||||
* Parses the rest request on top of the SearchSourceBuilder, preserving
|
||||
* values that are not overridden by the rest request.
|
||||
*/
|
||||
private static void parseSearchSource(final SearchSourceBuilder searchSourceBuilder, RestRequest request) {
|
||||
QueryBuilder<?> queryBuilder = RestActions.urlParamsToQueryBuilder(request);
|
||||
if (queryBuilder != null) {
|
||||
searchSourceBuilder.query(queryBuilder);
|
||||
modified = true;
|
||||
}
|
||||
|
||||
int from = request.paramAsInt("from", -1);
|
||||
if (from != -1) {
|
||||
searchSourceBuilder.from(from);
|
||||
modified = true;
|
||||
}
|
||||
int size = request.paramAsInt("size", -1);
|
||||
if (size != -1) {
|
||||
searchSourceBuilder.size(size);
|
||||
modified = true;
|
||||
}
|
||||
|
||||
if (request.hasParam("explain")) {
|
||||
searchSourceBuilder.explain(request.paramAsBoolean("explain", null));
|
||||
modified = true;
|
||||
}
|
||||
if (request.hasParam("version")) {
|
||||
searchSourceBuilder.version(request.paramAsBoolean("version", null));
|
||||
modified = true;
|
||||
}
|
||||
if (request.hasParam("timeout")) {
|
||||
searchSourceBuilder.timeout(request.paramAsTime("timeout", null));
|
||||
modified = true;
|
||||
}
|
||||
if (request.hasParam("terminate_after")) {
|
||||
int terminateAfter = request.paramAsInt("terminate_after",
|
||||
|
@ -191,7 +189,6 @@ public class RestSearchAction extends BaseRestHandler {
|
|||
throw new IllegalArgumentException("terminateAfter must be > 0");
|
||||
} else if (terminateAfter > 0) {
|
||||
searchSourceBuilder.terminateAfter(terminateAfter);
|
||||
modified = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -199,13 +196,11 @@ public class RestSearchAction extends BaseRestHandler {
|
|||
if (sField != null) {
|
||||
if (!Strings.hasText(sField)) {
|
||||
searchSourceBuilder.noFields();
|
||||
modified = true;
|
||||
} else {
|
||||
String[] sFields = Strings.splitStringByCommaToArray(sField);
|
||||
if (sFields != null) {
|
||||
for (String field : sFields) {
|
||||
searchSourceBuilder.field(field);
|
||||
modified = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -217,7 +212,6 @@ public class RestSearchAction extends BaseRestHandler {
|
|||
if (sFields != null) {
|
||||
for (String field : sFields) {
|
||||
searchSourceBuilder.fieldDataField(field);
|
||||
modified = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -225,12 +219,10 @@ public class RestSearchAction extends BaseRestHandler {
|
|||
FetchSourceContext fetchSourceContext = FetchSourceContext.parseFromRestRequest(request);
|
||||
if (fetchSourceContext != null) {
|
||||
searchSourceBuilder.fetchSource(fetchSourceContext);
|
||||
modified = true;
|
||||
}
|
||||
|
||||
if (request.hasParam("track_scores")) {
|
||||
searchSourceBuilder.trackScores(request.paramAsBoolean("track_scores", false));
|
||||
modified = true;
|
||||
}
|
||||
|
||||
String sSorts = request.param("sort");
|
||||
|
@ -243,14 +235,11 @@ public class RestSearchAction extends BaseRestHandler {
|
|||
String reverse = sort.substring(delimiter + 1);
|
||||
if ("asc".equals(reverse)) {
|
||||
searchSourceBuilder.sort(sortField, SortOrder.ASC);
|
||||
modified = true;
|
||||
} else if ("desc".equals(reverse)) {
|
||||
searchSourceBuilder.sort(sortField, SortOrder.DESC);
|
||||
modified = true;
|
||||
}
|
||||
} else {
|
||||
searchSourceBuilder.sort(sort);
|
||||
modified = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -258,7 +247,6 @@ public class RestSearchAction extends BaseRestHandler {
|
|||
String sStats = request.param("stats");
|
||||
if (sStats != null) {
|
||||
searchSourceBuilder.stats(Arrays.asList(Strings.splitStringByCommaToArray(sStats)));
|
||||
modified = true;
|
||||
}
|
||||
|
||||
String suggestField = request.param("suggest_field");
|
||||
|
@ -268,8 +256,6 @@ public class RestSearchAction extends BaseRestHandler {
|
|||
String suggestMode = request.param("suggest_mode");
|
||||
searchSourceBuilder.suggest(new SuggestBuilder().addSuggestion(
|
||||
termSuggestion(suggestField).field(suggestField).text(suggestText).size(suggestSize).suggestMode(suggestMode)));
|
||||
modified = true;
|
||||
}
|
||||
return modified;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -114,14 +114,14 @@ public class RestActions {
|
|||
return queryBuilder;
|
||||
}
|
||||
|
||||
public static SearchSourceBuilder getRestSearchSource(BytesReference sourceBytes, IndicesQueriesRegistry queryRegistry,
|
||||
public static void parseRestSearchSource(SearchSourceBuilder source, BytesReference sourceBytes, IndicesQueriesRegistry queryRegistry,
|
||||
ParseFieldMatcher parseFieldMatcher, AggregatorParsers aggParsers)
|
||||
throws IOException {
|
||||
XContentParser parser = XContentFactory.xContent(sourceBytes).createParser(sourceBytes);
|
||||
QueryParseContext queryParseContext = new QueryParseContext(queryRegistry);
|
||||
queryParseContext.reset(parser);
|
||||
queryParseContext.parseFieldMatcher(parseFieldMatcher);
|
||||
return SearchSourceBuilder.parseSearchSource(parser, queryParseContext, aggParsers);
|
||||
source.parseXContent(parser, queryParseContext, aggParsers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -30,7 +30,7 @@ import org.elasticsearch.rest.RestStatus;
|
|||
* A REST based action listener that assumes the response is of type {@link ToXContent} and automatically
|
||||
* builds an XContent based response (wrapping the toXContent in startObject/endObject).
|
||||
*/
|
||||
public final class RestToXContentListener<Response extends ToXContent> extends RestResponseListener<Response> {
|
||||
public class RestToXContentListener<Response extends ToXContent> extends RestResponseListener<Response> {
|
||||
|
||||
public RestToXContentListener(RestChannel channel) {
|
||||
super(channel);
|
||||
|
@ -45,6 +45,10 @@ public final class RestToXContentListener<Response extends ToXContent> extends R
|
|||
builder.startObject();
|
||||
response.toXContent(builder, channel.request());
|
||||
builder.endObject();
|
||||
return new BytesRestResponse(RestStatus.OK, builder);
|
||||
return new BytesRestResponse(getStatus(response), builder);
|
||||
}
|
||||
|
||||
protected RestStatus getStatus(Response response) {
|
||||
return RestStatus.OK;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -734,9 +734,22 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
|||
return ext;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new SearchSourceBuilder with attributes set by an xContent.
|
||||
*/
|
||||
public SearchSourceBuilder fromXContent(XContentParser parser, QueryParseContext context, AggregatorParsers aggParsers)
|
||||
throws IOException {
|
||||
SearchSourceBuilder builder = new SearchSourceBuilder();
|
||||
builder.parseXContent(parser, context, aggParsers);
|
||||
return builder;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse some xContent into this SearchSourceBuilder, overwriting any values specified in the xContent. Use this if you need to set up
|
||||
* different defaults than a regular SearchSourceBuilder would have and use
|
||||
* {@link #fromXContent(XContentParser, QueryParseContext, AggregatorParsers)} if you have normal defaults.
|
||||
*/
|
||||
public void parseXContent(XContentParser parser, QueryParseContext context, AggregatorParsers aggParsers) throws IOException {
|
||||
XContentParser.Token token = parser.currentToken();
|
||||
String currentFieldName = null;
|
||||
if (token != XContentParser.Token.START_OBJECT && (token = parser.nextToken()) != XContentParser.Token.START_OBJECT) {
|
||||
|
@ -748,44 +761,42 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
|||
currentFieldName = parser.currentName();
|
||||
} else if (token.isValue()) {
|
||||
if (context.parseFieldMatcher().match(currentFieldName, FROM_FIELD)) {
|
||||
builder.from = parser.intValue();
|
||||
from = parser.intValue();
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, SIZE_FIELD)) {
|
||||
builder.size = parser.intValue();
|
||||
size = parser.intValue();
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, TIMEOUT_FIELD)) {
|
||||
builder.timeoutInMillis = parser.longValue();
|
||||
timeoutInMillis = parser.longValue();
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, TERMINATE_AFTER_FIELD)) {
|
||||
builder.terminateAfter = parser.intValue();
|
||||
terminateAfter = parser.intValue();
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, MIN_SCORE_FIELD)) {
|
||||
builder.minScore = parser.floatValue();
|
||||
minScore = parser.floatValue();
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, VERSION_FIELD)) {
|
||||
builder.version = parser.booleanValue();
|
||||
version = parser.booleanValue();
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, EXPLAIN_FIELD)) {
|
||||
builder.explain = parser.booleanValue();
|
||||
explain = parser.booleanValue();
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, TRACK_SCORES_FIELD)) {
|
||||
builder.trackScores = parser.booleanValue();
|
||||
trackScores = parser.booleanValue();
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, _SOURCE_FIELD)) {
|
||||
builder.fetchSourceContext = FetchSourceContext.parse(parser, context);
|
||||
fetchSourceContext = FetchSourceContext.parse(parser, context);
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, FIELDS_FIELD)) {
|
||||
List<String> fieldNames = new ArrayList<>();
|
||||
fieldNames.add(parser.text());
|
||||
builder.fieldNames = fieldNames;
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, SORT_FIELD)) {
|
||||
builder.sort(parser.text());
|
||||
sort(parser.text());
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, PROFILE_FIELD)) {
|
||||
builder.profile = parser.booleanValue();
|
||||
profile = parser.booleanValue();
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName + "].",
|
||||
parser.getTokenLocation());
|
||||
}
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
if (context.parseFieldMatcher().match(currentFieldName, QUERY_FIELD)) {
|
||||
builder.queryBuilder = context.parseInnerQueryBuilder();
|
||||
queryBuilder = context.parseInnerQueryBuilder();
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, POST_FILTER_FIELD)) {
|
||||
builder.postQueryBuilder = context.parseInnerQueryBuilder();
|
||||
postQueryBuilder = context.parseInnerQueryBuilder();
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, _SOURCE_FIELD)) {
|
||||
builder.fetchSourceContext = FetchSourceContext.parse(parser, context);
|
||||
fetchSourceContext = FetchSourceContext.parse(parser, context);
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, SCRIPT_FIELDS_FIELD)) {
|
||||
List<ScriptField> scriptFields = new ArrayList<>();
|
||||
scriptFields = new ArrayList<>();
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
String scriptFieldName = parser.currentName();
|
||||
token = parser.nextToken();
|
||||
|
@ -822,9 +833,8 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
|||
+ currentFieldName + "] but found [" + token + "]", parser.getTokenLocation());
|
||||
}
|
||||
}
|
||||
builder.scriptFields = scriptFields;
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, INDICES_BOOST_FIELD)) {
|
||||
ObjectFloatHashMap<String> indexBoost = new ObjectFloatHashMap<String>();
|
||||
indexBoost = new ObjectFloatHashMap<String>();
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
|
@ -835,25 +845,23 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
|||
parser.getTokenLocation());
|
||||
}
|
||||
}
|
||||
builder.indexBoost = indexBoost;
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, AGGREGATIONS_FIELD)) {
|
||||
builder.aggregations = aggParsers.parseAggregators(parser, context);
|
||||
aggregations = aggParsers.parseAggregators(parser, context);
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, HIGHLIGHT_FIELD)) {
|
||||
builder.highlightBuilder = HighlightBuilder.PROTOTYPE.fromXContent(context);
|
||||
highlightBuilder = HighlightBuilder.PROTOTYPE.fromXContent(context);
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, INNER_HITS_FIELD)) {
|
||||
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser);
|
||||
builder.innerHitsBuilder = xContentBuilder.bytes();
|
||||
innerHitsBuilder = xContentBuilder.bytes();
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, SUGGEST_FIELD)) {
|
||||
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser);
|
||||
builder.suggestBuilder = xContentBuilder.bytes();
|
||||
suggestBuilder = xContentBuilder.bytes();
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, SORT_FIELD)) {
|
||||
List<BytesReference> sorts = new ArrayList<>();
|
||||
sorts = new ArrayList<>();
|
||||
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser);
|
||||
sorts.add(xContentBuilder.bytes());
|
||||
builder.sorts = sorts;
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, EXT_FIELD)) {
|
||||
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser);
|
||||
builder.ext = xContentBuilder.bytes();
|
||||
ext = xContentBuilder.bytes();
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName + "].",
|
||||
parser.getTokenLocation());
|
||||
|
@ -861,7 +869,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
|||
} else if (token == XContentParser.Token.START_ARRAY) {
|
||||
|
||||
if (context.parseFieldMatcher().match(currentFieldName, FIELDS_FIELD)) {
|
||||
List<String> fieldNames = new ArrayList<>();
|
||||
fieldNames = new ArrayList<>();
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
|
||||
if (token == XContentParser.Token.VALUE_STRING) {
|
||||
fieldNames.add(parser.text());
|
||||
|
@ -870,9 +878,8 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
|||
+ currentFieldName + "] but found [" + token + "]", parser.getTokenLocation());
|
||||
}
|
||||
}
|
||||
builder.fieldNames = fieldNames;
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, FIELDDATA_FIELDS_FIELD)) {
|
||||
List<String> fieldDataFields = new ArrayList<>();
|
||||
fieldDataFields = new ArrayList<>();
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
|
||||
if (token == XContentParser.Token.VALUE_STRING) {
|
||||
fieldDataFields.add(parser.text());
|
||||
|
@ -881,22 +888,19 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
|||
+ currentFieldName + "] but found [" + token + "]", parser.getTokenLocation());
|
||||
}
|
||||
}
|
||||
builder.fieldDataFields = fieldDataFields;
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, SORT_FIELD)) {
|
||||
List<BytesReference> sorts = new ArrayList<>();
|
||||
sorts = new ArrayList<>();
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
|
||||
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser);
|
||||
sorts.add(xContentBuilder.bytes());
|
||||
}
|
||||
builder.sorts = sorts;
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, RESCORE_FIELD)) {
|
||||
List<RescoreBuilder<?>> rescoreBuilders = new ArrayList<>();
|
||||
rescoreBuilders = new ArrayList<>();
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
|
||||
rescoreBuilders.add(RescoreBuilder.parseFromXContent(context));
|
||||
}
|
||||
builder.rescoreBuilders = rescoreBuilders;
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, STATS_FIELD)) {
|
||||
List<String> stats = new ArrayList<>();
|
||||
stats = new ArrayList<>();
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
|
||||
if (token == XContentParser.Token.VALUE_STRING) {
|
||||
stats.add(parser.text());
|
||||
|
@ -905,11 +909,10 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
|||
+ currentFieldName + "] but found [" + token + "]", parser.getTokenLocation());
|
||||
}
|
||||
}
|
||||
builder.stats = stats;
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, _SOURCE_FIELD)) {
|
||||
builder.fetchSourceContext = FetchSourceContext.parse(parser, context);
|
||||
fetchSourceContext = FetchSourceContext.parse(parser, context);
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, SEARCH_AFTER)) {
|
||||
builder.searchAfterBuilder = SearchAfterBuilder.PROTOTYPE.fromXContent(parser, context.parseFieldMatcher());
|
||||
searchAfterBuilder = SearchAfterBuilder.PROTOTYPE.fromXContent(parser, context.parseFieldMatcher());
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName + "].",
|
||||
parser.getTokenLocation());
|
||||
|
@ -919,7 +922,6 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
|||
parser.getTokenLocation());
|
||||
}
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
|
||||
package org.elasticsearch.tasks;
|
||||
|
||||
import org.elasticsearch.common.Nullable;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
/**
|
||||
|
@ -56,4 +58,11 @@ public class CancellableTask extends Task {
|
|||
return reason.get() != null;
|
||||
}
|
||||
|
||||
/**
|
||||
* The reason the task was cancelled or null if it hasn't been cancelled.
|
||||
*/
|
||||
@Nullable
|
||||
public String getReasonCancelled() {
|
||||
return reason.get();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,54 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.tasks;
|
||||
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
|
||||
/**
|
||||
* A TaskListener that just logs the response at the info level. Used when we
|
||||
* need a listener but aren't returning the result to the user.
|
||||
*/
|
||||
public final class LoggingTaskListener<Response> implements TaskListener<Response> {
|
||||
private final static ESLogger logger = Loggers.getLogger(LoggingTaskListener.class);
|
||||
|
||||
/**
|
||||
* Get the instance of NoopActionListener cast appropriately.
|
||||
*/
|
||||
@SuppressWarnings("unchecked") // Safe because we only toString the response
|
||||
public static <Response> TaskListener<Response> instance() {
|
||||
return (TaskListener<Response>) INSTANCE;
|
||||
}
|
||||
|
||||
private static final LoggingTaskListener<Object> INSTANCE = new LoggingTaskListener<Object>();
|
||||
|
||||
private LoggingTaskListener() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onResponse(Task task, Response response) {
|
||||
logger.info("{} finished with response {}", task.getId(), response);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Task task, Throwable e) {
|
||||
logger.warn("{} failed with exception", e, task.getId());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,49 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.tasks;
|
||||
|
||||
/**
|
||||
* Listener for Task success or failure.
|
||||
*/
|
||||
public interface TaskListener<Response> {
|
||||
/**
|
||||
* Handle task response. This response may constitute a failure or a success
|
||||
* but it is up to the listener to make that decision.
|
||||
*
|
||||
* @param task
|
||||
* the task being executed. May be null if the action doesn't
|
||||
* create a task
|
||||
* @param response
|
||||
* the response from the action that executed the task
|
||||
*/
|
||||
void onResponse(Task task, Response response);
|
||||
|
||||
/**
|
||||
* A failure caused by an exception at some phase of the task.
|
||||
*
|
||||
* @param task
|
||||
* the task being executed. May be null if the action doesn't
|
||||
* create a task
|
||||
* @param e
|
||||
* the failure
|
||||
*/
|
||||
void onFailure(Task task, Throwable e);
|
||||
|
||||
}
|
|
@ -22,8 +22,8 @@ package org.elasticsearch.action.admin.indices.create;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.rest.NoOpClient;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.client.NoOpClient;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
|
|
|
@ -0,0 +1,36 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.bulk;
|
||||
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import static org.apache.lucene.util.TestUtil.randomSimpleString;
|
||||
|
||||
public class BulkShardRequestTests extends ESTestCase {
|
||||
public void testToString() {
|
||||
String index = randomSimpleString(getRandom(), 10);
|
||||
int count = between(1, 100);
|
||||
BulkShardRequest r = new BulkShardRequest(null, new ShardId(index, "ignored", 0), false, new BulkItemRequest[count]);
|
||||
assertEquals("BulkShardRequest to [" + index + "] containing [" + count + "] requests", r.toString());
|
||||
r = new BulkShardRequest(null, new ShardId(index, "ignored", 0), true, new BulkItemRequest[count]);
|
||||
assertEquals("BulkShardRequest to [" + index + "] containing [" + count + "] requests and a refresh", r.toString());
|
||||
}
|
||||
}
|
|
@ -25,8 +25,8 @@ import org.elasticsearch.action.support.PlainActionFuture;
|
|||
import org.elasticsearch.action.update.UpdateRequest;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
|
||||
import org.elasticsearch.rest.NoOpClient;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.client.NoOpClient;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
|
|
|
@ -23,8 +23,8 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.rest.NoOpClient;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.client.NoOpClient;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
|
|
|
@ -0,0 +1,461 @@
|
|||
[[docs-reindex]]
|
||||
==== Reindex API
|
||||
|
||||
`_reindex`'s most basic form just copies documents from one index to another.
|
||||
This will copy documents from `twitter` into `new_twitter`:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /_reindex
|
||||
{
|
||||
"source": {
|
||||
"index": "twitter"
|
||||
},
|
||||
"dest": {
|
||||
"index": "new_twitter"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
That will return something like this:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"took" : 639,
|
||||
"updated": 112,
|
||||
"batches": 130,
|
||||
"version_conflicts": 0,
|
||||
"failures" : [ ],
|
||||
"created": 12344
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
Just like `_update_by_query`, `_reindex` gets a snapshot of the source index
|
||||
but its target must be a **different** index so version conflicts are unlikely.
|
||||
The `dest` element can be configured like the index API to control optimistic
|
||||
concurrency control. Just leaving out `version_type` (as above) or setting it
|
||||
to `internal` will cause Elasticsearch to blindly dump documents into the
|
||||
target, overwriting any that happen to have the same type and id:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /_reindex
|
||||
{
|
||||
"source": {
|
||||
"index": "twitter"
|
||||
},
|
||||
"dest": {
|
||||
"index": "new_twitter",
|
||||
"version_type": "internal"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
Setting `version_type` to `external` will cause Elasticsearch to preserve the
|
||||
`version` from the source, create any documents that are missing, and update
|
||||
any documents that have an older version in the destination index than they do
|
||||
in the source index:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /_reindex
|
||||
{
|
||||
"source": {
|
||||
"index": "twitter"
|
||||
},
|
||||
"dest": {
|
||||
"index": "new_twitter",
|
||||
"version_type": "external"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
Settings `op_type` to `create` will cause `_reindex` to only create missing
|
||||
documents in the target index. All existing documents will cause a version
|
||||
conflict:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /_reindex
|
||||
{
|
||||
"source": {
|
||||
"index": "twitter"
|
||||
},
|
||||
"dest": {
|
||||
"index": "new_twitter",
|
||||
"op_type": "create"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
By default version conflicts abort the `_reindex` process but you can just
|
||||
count them by settings `"conflicts": "proceed"` in the request body:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /_reindex
|
||||
{
|
||||
"conflicts": "proceed",
|
||||
"source": {
|
||||
"index": "twitter"
|
||||
},
|
||||
"dest": {
|
||||
"index": "new_twitter",
|
||||
"op_type": "create"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
You can limit the documents by adding a type to the `source` or by adding a
|
||||
query. This will only copy `tweet`s made by `kimchy` into `new_twitter`:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /_reindex
|
||||
{
|
||||
"source": {
|
||||
"index": "twitter",
|
||||
"type": "tweet",
|
||||
"query": {
|
||||
"term": {
|
||||
"user": "kimchy"
|
||||
}
|
||||
}
|
||||
},
|
||||
"dest": {
|
||||
"index": "new_twitter"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
`index` and `type` in `source` can both be lists, allowing you to copy from
|
||||
lots of sources in one request. This will copy documents from the `tweet` and
|
||||
`post` types in the `twitter` and `blog` index. It'd include the `post` type in
|
||||
the `twitter` index and the `tweet` type in the `blog` index. If you want to be
|
||||
more specific you'll need to use the `query`. It also makes no effort to handle
|
||||
id collisions. The target index will remain valid but it's not easy to predict
|
||||
which document will survive because the iteration order isn't well defined.
|
||||
Just avoid that situation, ok?
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /_reindex
|
||||
{
|
||||
"source": {
|
||||
"index": ["twitter", "blog"],
|
||||
"type": ["tweet", "post"]
|
||||
},
|
||||
"index": {
|
||||
"index": "all_together"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
It's also possible to limit the number of processed documents by setting
|
||||
`size`. This will only copy a single document from `twitter` to
|
||||
`new_twitter`:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /_reindex
|
||||
{
|
||||
"size": 1,
|
||||
"source": {
|
||||
"index": "twitter"
|
||||
},
|
||||
"dest": {
|
||||
"index": "new_twitter"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
If you want a particular set of documents from the twitter index you'll
|
||||
need to sort. Sorting makes the scroll less efficient but in some contexts
|
||||
it's worth it. If possible, prefer a more selective query to `size` and `sort`.
|
||||
This will copy 10000 documents from `twitter` into `new_twitter`:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /_reindex
|
||||
{
|
||||
"size": 10000,
|
||||
"source": {
|
||||
"index": "twitter",
|
||||
"sort": { "date": "desc" }
|
||||
},
|
||||
"dest": {
|
||||
"index": "new_twitter"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
Like `_update_by_query`, `_reindex` supports a script that modifies the
|
||||
document. Unlike `_update_by_query`, the script is allowed to modify the
|
||||
document's metadata. This example bumps the version of the source document:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /_reindex
|
||||
{
|
||||
"source": {
|
||||
"index": "twitter",
|
||||
},
|
||||
"dest": {
|
||||
"index": "new_twitter",
|
||||
"version_type": "external"
|
||||
}
|
||||
"script": {
|
||||
"internal": "if (ctx._source.foo == 'bar') {ctx._version++; ctx._source.remove('foo')}"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
Think of the possibilities! Just be careful! With great power.... You can
|
||||
change:
|
||||
* "_id"
|
||||
* "_type"
|
||||
* "_index"
|
||||
* "_version"
|
||||
* "_routing"
|
||||
* "_parent"
|
||||
* "_timestamp"
|
||||
* "_ttl"
|
||||
|
||||
Setting `_version` to `null` or clearing it from the `ctx` map is just like not
|
||||
sending the version in an indexing request. It will cause that document to be
|
||||
overwritten in the target index regardless of the version on the target or the
|
||||
version type you use in the `_reindex` request.
|
||||
|
||||
By default if `_reindex` sees a document with routing then the routing is
|
||||
preserved unless it's changed by the script. You can set `routing` on the
|
||||
`dest` request to change this:
|
||||
|
||||
`keep`::
|
||||
|
||||
Sets the routing on the bulk request sent for each match to the routing on
|
||||
the match. The default.
|
||||
|
||||
`discard`::
|
||||
|
||||
Sets the routing on the bulk request sent for each match to null.
|
||||
|
||||
`=<some text>`::
|
||||
|
||||
Sets the routing on the bulk request sent for each match to all text after
|
||||
the `=`.
|
||||
|
||||
For example, you can use the following request to copy all documents from
|
||||
the `source` index with the company name `cat` into the `dest` index with
|
||||
routing set to `cat`.
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /_reindex
|
||||
{
|
||||
"source": {
|
||||
"index": "source"
|
||||
"query": {
|
||||
"match": {
|
||||
"company": "cat"
|
||||
}
|
||||
}
|
||||
}
|
||||
"index": {
|
||||
"index": "dest",
|
||||
"routing": "=cat"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
|
||||
[float]
|
||||
=== URL Parameters
|
||||
|
||||
In addition to the standard parameters like `pretty`, the Reindex API also
|
||||
supports `refresh`, `wait_for_completion`, `consistency`, and `timeout`.
|
||||
|
||||
Sending the `refresh` url parameter will cause all indexes to which the request
|
||||
wrote to be refreshed. This is different than the Index API's `refresh`
|
||||
parameter which causes just the shard that received the new data to be indexed.
|
||||
|
||||
If the request contains `wait_for_completion=false` then Elasticsearch will
|
||||
perform some preflight checks, launch the request, and then return a `task`
|
||||
which can be used with <<docs-reindex-task-api,Tasks APIs>> to cancel or get
|
||||
the status of the task. For now, once the request is finished the task is gone
|
||||
and the only place to look for the ultimate result of the task is in the
|
||||
Elasticsearch log file. This will be fixed soon.
|
||||
|
||||
`consistency` controls how many copies of a shard must respond to each write
|
||||
request. `timeout` controls how long each write request waits for unavailable
|
||||
shards to become available. Both work exactly how they work in the
|
||||
{ref}/docs-bulk.html[Bulk API].
|
||||
|
||||
`timeout` controls how long each batch waits for the target shard to become
|
||||
available. It works exactly how it works in the {ref}/docs-bulk.html[Bulk API].
|
||||
|
||||
[float]
|
||||
=== Response body
|
||||
|
||||
The JSON response looks like this:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"took" : 639,
|
||||
"updated": 0,
|
||||
"created": 123,
|
||||
"batches": 1,
|
||||
"version_conflicts": 2,
|
||||
"failures" : [ ]
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
`took`::
|
||||
|
||||
The number of milliseconds from start to end of the whole operation.
|
||||
|
||||
`updated`::
|
||||
|
||||
The number of documents that were successfully updated.
|
||||
|
||||
`created`::
|
||||
|
||||
The number of documents that were successfully created.
|
||||
|
||||
`batches`::
|
||||
|
||||
The number of scroll responses pulled back by the the reindex.
|
||||
|
||||
`version_conflicts`::
|
||||
|
||||
The number of version conflicts that reindex hit.
|
||||
|
||||
`failures`::
|
||||
|
||||
Array of all indexing failures. If this is non-empty then the request aborted
|
||||
because of those failures. See `conflicts` for how to prevent version conflicts
|
||||
from aborting the operation.
|
||||
|
||||
[float]
|
||||
[[docs-reindex-task-api]]
|
||||
=== Works with the Task API
|
||||
|
||||
While Reindex is running you can fetch their status using the
|
||||
{ref}/task/list.html[Task List APIs]:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /_tasks/?pretty&detailed=true&actions=*reindex
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
The responses looks like:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"nodes" : {
|
||||
"r1A2WoRbTwKZ516z6NEs5A" : {
|
||||
"name" : "Tyrannus",
|
||||
"transport_address" : "127.0.0.1:9300",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1:9300",
|
||||
"attributes" : {
|
||||
"testattr" : "test",
|
||||
"portsfile" : "true"
|
||||
},
|
||||
"tasks" : [ {
|
||||
"node" : "r1A2WoRbTwKZ516z6NEs5A",
|
||||
"id" : 36619,
|
||||
"type" : "transport",
|
||||
"action" : "indices:data/write/reindex",
|
||||
"status" : { <1>
|
||||
"total" : 6154,
|
||||
"updated" : 3500,
|
||||
"created" : 0,
|
||||
"deleted" : 0,
|
||||
"batches" : 36,
|
||||
"version_conflicts" : 0,
|
||||
"noops" : 0
|
||||
},
|
||||
"description" : ""
|
||||
} ]
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
<1> this object contains the actual status. It is just like the response json
|
||||
with the important addition of the `total` field. `total` is the total number
|
||||
of operations that the reindex expects to perform. You can estimate the
|
||||
progress by adding the `updated`, `created`, and `deleted` fields. The request
|
||||
will finish when their sum is equal to the `total` field.
|
||||
|
||||
|
||||
[float]
|
||||
=== Examples
|
||||
|
||||
==== Change the name of a field
|
||||
|
||||
`_reindex` can be used to build a copy of an index with renamed fields. Say you
|
||||
create an index containing documents that look like this:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST test/test/1?refresh&pretty
|
||||
{
|
||||
"text": "words words",
|
||||
"flag": "foo"
|
||||
}
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
But you don't like the name `flag` and want to replace it with `tag`.
|
||||
`_reindex` can create the other index for you:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST _reindex?pretty
|
||||
{
|
||||
"source": {
|
||||
"index": "test"
|
||||
},
|
||||
"dest": {
|
||||
"index": "test2"
|
||||
},
|
||||
"script": {
|
||||
"inline": "ctx._source.tag = ctx._source.remove(\"flag\")"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
Now you can get the new document:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET test2/test/1?pretty
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
and it'll look like:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"text": "words words",
|
||||
"tag": "foo"
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
Or you can search by `tag` or whatever you want.
|
|
@ -0,0 +1,358 @@
|
|||
[[docs-update-by-query]]
|
||||
==== Update By Query API
|
||||
|
||||
The simplest usage of `_update_by_query` just performs an update on every
|
||||
document in the index without changing the source. This is useful to
|
||||
<<picking-up-a-new-property,pick up a new property>> or some other online
|
||||
mapping change. Here is the API:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /twitter/_update_by_query?conflicts=proceed
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
That will return something like this:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"took" : 639,
|
||||
"updated": 1235,
|
||||
"batches": 13,
|
||||
"version_conflicts": 2,
|
||||
"failures" : [ ]
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
`_update_by_query` gets a snapshot of the index when it starts and indexes what
|
||||
it finds using `internal` versioning. That means that you'll get a version
|
||||
conflict if the document changes between the time when the snapshot was taken
|
||||
and when the index request is processed. When the versions match the document
|
||||
is updated and the version number is incremented.
|
||||
|
||||
All update and query failures cause the `_update_by_query` to abort and are
|
||||
returned in the `failures` of the response. The updates that have been
|
||||
performed still stick. In other words, the process is not rolled back, only
|
||||
aborted. While the first failure causes the abort all failures that are
|
||||
returned by the failing bulk request are returned in the `failures` element so
|
||||
it's possible for there to be quite a few.
|
||||
|
||||
If you want to simply count version conflicts not cause the `_update_by_query`
|
||||
to abort you can set `conflicts=proceed` on the url or `"conflicts": "proceed"`
|
||||
in the request body. The first example does this because it is just trying to
|
||||
pick up an online mapping change and a version conflict simply means that the
|
||||
conflicting document was updated between the start of the `_update_by_query`
|
||||
and the time when it attempted to update the document. This is fine because
|
||||
that update will have picked up the online mapping update.
|
||||
|
||||
Back to the API format, you can limit `_update_by_query` to a single type. This
|
||||
will only update `tweet`s from the `twitter` index:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /twitter/tweet/_update_by_query?conflicts=proceed
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
You can also limit `_update_by_query` using the
|
||||
{ref}/query-dsl.html[Query DSL]. This will update all documents from the
|
||||
`twitter` index for the user `kimchy`:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /twitter/_update_by_query?conflicts=proceed
|
||||
{
|
||||
"query": { <1>
|
||||
"term": {
|
||||
"user": "kimchy"
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
<1> The query must be passed as a value to the `query` key, in the same
|
||||
way as the {ref}/search-search.html[Search API]. You can also use the `q`
|
||||
parameter in the same way as the search api.
|
||||
|
||||
So far we've only been updating documents without changing their source. That
|
||||
is genuinely useful for things like
|
||||
<<picking-up-a-new-property,picking up new properties>> but it's only half the
|
||||
fun. `_update_by_query` supports a `script` object to update the document. This
|
||||
will increment the `likes` field on all of kimchy's tweets:
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /twitter/_update_by_query
|
||||
{
|
||||
"script": {
|
||||
"inline": "ctx._source.likes++"
|
||||
},
|
||||
"query": {
|
||||
"term": {
|
||||
"user": "kimchy"
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
Just as in {ref}/docs-update.html[Update API] you can set `ctx.op = "noop"` if
|
||||
your script decides that it doesn't have to make any changes. That will cause
|
||||
`_update_by_query` to omit that document from its updates. Setting `ctx.op` to
|
||||
anything else is an error. If you want to delete by a query you can use the
|
||||
<<plugins-delete-by-query,Delete by Query Plugin>> instead. Setting any other
|
||||
field in `ctx` is an error.
|
||||
|
||||
Note that we stopped specifying `conflicts=proceed`. In this case we want a
|
||||
version conflict to abort the process so we can handle the failure.
|
||||
|
||||
This API doesn't allow you to move the documents it touches, just modify their
|
||||
source. This is intentional! We've made no provisions for removing the document
|
||||
from its original location.
|
||||
|
||||
It's also possible to do this whole thing on multiple indexes and multiple
|
||||
types at once, just like the search API:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /twitter,blog/tweet,post/_update_by_query
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
If you provide `routing` then the routing is copied to the scroll query,
|
||||
limiting the process to the shards that match that routing value:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /twitter/_update_by_query?routing=1
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
By default `_update_by_query` uses scroll batches of 100. You can change the
|
||||
batch size with the `scroll_size` URL parameter:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /twitter/_update_by_query?scroll_size=1000
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
[float]
|
||||
=== URL Parameters
|
||||
|
||||
In addition to the standard parameters like `pretty`, the Update By Query API
|
||||
also supports `refresh`, `wait_for_completion`, `consistency`, and `timeout`.
|
||||
|
||||
Sending the `refresh` will update all shards in the index being updated when
|
||||
the request completes. This is different than the Index API's `refresh`
|
||||
parameter which causes just the shard that received the new data to be indexed.
|
||||
|
||||
If the request contains `wait_for_completion=false` then Elasticsearch will
|
||||
perform some preflight checks, launch the request, and then return a `task`
|
||||
which can be used with <<docs-update-by-query-task-api,Tasks APIs>> to cancel
|
||||
or get the status of the task. For now, once the request is finished the task
|
||||
is gone and the only place to look for the ultimate result of the task is in
|
||||
the Elasticsearch log file. This will be fixed soon.
|
||||
|
||||
`consistency` controls how many copies of a shard must respond to each write
|
||||
request. `timeout` controls how long each write request waits for unavailable
|
||||
shards to become available. Both work exactly how they work in the
|
||||
{ref}/docs-bulk.html[Bulk API].
|
||||
|
||||
`timeout` controls how long each batch waits for the target shard to become
|
||||
available. It works exactly how it works in the {ref}/docs-bulk.html[Bulk API].
|
||||
|
||||
[float]
|
||||
=== Response body
|
||||
|
||||
The JSON response looks like this:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"took" : 639,
|
||||
"updated": 0,
|
||||
"batches": 1,
|
||||
"version_conflicts": 2,
|
||||
"failures" : [ ]
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
`took`::
|
||||
|
||||
The number of milliseconds from start to end of the whole operation.
|
||||
|
||||
`updated`::
|
||||
|
||||
The number of documents that were successfully updated.
|
||||
|
||||
`batches`::
|
||||
|
||||
The number of scroll responses pulled back by the the update by query.
|
||||
|
||||
`version_conflicts`::
|
||||
|
||||
The number of version conflicts that the update by query hit.
|
||||
|
||||
`failures`::
|
||||
|
||||
Array of all indexing failures. If this is non-empty then the request aborted
|
||||
because of those failures. See `conflicts` for how to prevent version conflicts
|
||||
from aborting the operation.
|
||||
|
||||
|
||||
[float]
|
||||
[[docs-update-by-query-task-api]]
|
||||
=== Works with the Task API
|
||||
|
||||
While Update By Query is running you can fetch their status using the
|
||||
{ref}/task/list.html[Task List APIs]:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /_tasks/?pretty&detailed=true&action=byquery
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
The responses looks like:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"nodes" : {
|
||||
"r1A2WoRbTwKZ516z6NEs5A" : {
|
||||
"name" : "Tyrannus",
|
||||
"transport_address" : "127.0.0.1:9300",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1:9300",
|
||||
"attributes" : {
|
||||
"testattr" : "test",
|
||||
"portsfile" : "true"
|
||||
},
|
||||
"tasks" : [ {
|
||||
"node" : "r1A2WoRbTwKZ516z6NEs5A",
|
||||
"id" : 36619,
|
||||
"type" : "transport",
|
||||
"action" : "indices:data/write/update/byquery",
|
||||
"status" : { <1>
|
||||
"total" : 6154,
|
||||
"updated" : 3500,
|
||||
"created" : 0,
|
||||
"deleted" : 0,
|
||||
"batches" : 36,
|
||||
"version_conflicts" : 0,
|
||||
"noops" : 0
|
||||
},
|
||||
"description" : ""
|
||||
} ]
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
<1> this object contains the actual status. It is just like the response json
|
||||
with the important addition of the `total` field. `total` is the total number
|
||||
of operations that the reindex expects to perform. You can estimate the
|
||||
progress by adding the `updated`, `created`, and `deleted` fields. The request
|
||||
will finish when their sum is equal to the `total` field.
|
||||
|
||||
|
||||
[float]
|
||||
=== Examples
|
||||
|
||||
[[picking-up-a-new-property]]
|
||||
==== Pick up a new property
|
||||
|
||||
Say you created an index without dynamic mapping, filled it with data, and then
|
||||
added a mapping value to pick up more fields from the data:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT test
|
||||
{
|
||||
"mappings": {
|
||||
"test": {
|
||||
"dynamic": false, <1>
|
||||
"properties": {
|
||||
"text": {"type": "string"}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
POST test/test?refresh
|
||||
{
|
||||
"text": "words words",
|
||||
"flag": "bar"
|
||||
}'
|
||||
POST test/test?refresh
|
||||
{
|
||||
"text": "words words",
|
||||
"flag": "foo"
|
||||
}'
|
||||
PUT test/_mapping/test <2>
|
||||
{
|
||||
"properties": {
|
||||
"text": {"type": "string"},
|
||||
"flag": {"type": "string", "analyzer": "keyword"}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
<1> This means that new fields won't be indexed, just stored in `_source`.
|
||||
|
||||
<2> This updates the mapping to add the new `flag` field. To pick up the new
|
||||
field you have to reindex all documents with it.
|
||||
|
||||
Searching for the data won't find anything:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST test/_search?filter_path=hits.total
|
||||
{
|
||||
"query": {
|
||||
"match": {
|
||||
"flag": "foo"
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"hits" : {
|
||||
"total" : 0
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
But you can issue an `_update_by_query` request to pick up the new mapping:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST test/_update_by_query?refresh&conflicts=proceed
|
||||
POST test/_search?filter_path=hits.total
|
||||
{
|
||||
"query": {
|
||||
"match": {
|
||||
"flag": "foo"
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"hits" : {
|
||||
"total" : 1
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
Hurray! You can do the exact same thing when adding a field to a multifield.
|
|
@ -0,0 +1,23 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
esplugin {
|
||||
description 'The Reindex module adds APIs to reindex from one index to another or update documents in place.'
|
||||
classname 'org.elasticsearch.index.reindex.ReindexPlugin'
|
||||
}
|
|
@ -0,0 +1,411 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
|
||||
import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
|
||||
import org.elasticsearch.action.bulk.BackoffPolicy;
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse;
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse.Failure;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.bulk.BulkResponse;
|
||||
import org.elasticsearch.action.bulk.Retry;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.action.search.ClearScrollRequest;
|
||||
import org.elasticsearch.action.search.ClearScrollResponse;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.search.SearchScrollRequest;
|
||||
import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static java.lang.Math.max;
|
||||
import static java.lang.Math.min;
|
||||
import static java.util.Collections.emptyList;
|
||||
import static java.util.Collections.unmodifiableList;
|
||||
import static org.elasticsearch.action.bulk.BackoffPolicy.exponentialBackoff;
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueNanos;
|
||||
import static org.elasticsearch.index.reindex.AbstractBulkByScrollRequest.SIZE_ALL_MATCHES;
|
||||
import static org.elasticsearch.rest.RestStatus.CONFLICT;
|
||||
import static org.elasticsearch.search.sort.SortBuilders.fieldSort;
|
||||
|
||||
/**
|
||||
* Abstract base for scrolling across a search and executing bulk actions on all
|
||||
* results.
|
||||
*/
|
||||
public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBulkByScrollRequest<Request>, Response> {
|
||||
/**
|
||||
* The request for this action. Named mainRequest because we create lots of <code>request</code> variables all representing child
|
||||
* requests of this mainRequest.
|
||||
*/
|
||||
protected final Request mainRequest;
|
||||
protected final BulkByScrollTask task;
|
||||
|
||||
private final AtomicLong startTime = new AtomicLong(-1);
|
||||
private final AtomicReference<String> scroll = new AtomicReference<>();
|
||||
private final Set<String> destinationIndices = Collections.newSetFromMap(new ConcurrentHashMap<>());
|
||||
|
||||
private final ESLogger logger;
|
||||
private final Client client;
|
||||
private final ThreadPool threadPool;
|
||||
private final SearchRequest firstSearchRequest;
|
||||
private final ActionListener<Response> listener;
|
||||
private final Retry retry;
|
||||
|
||||
public AbstractAsyncBulkByScrollAction(BulkByScrollTask task, ESLogger logger, Client client, ThreadPool threadPool,
|
||||
Request mainRequest, SearchRequest firstSearchRequest, ActionListener<Response> listener) {
|
||||
this.task = task;
|
||||
this.logger = logger;
|
||||
this.client = client;
|
||||
this.threadPool = threadPool;
|
||||
this.mainRequest = mainRequest;
|
||||
this.firstSearchRequest = firstSearchRequest;
|
||||
this.listener = listener;
|
||||
retry = Retry.on(EsRejectedExecutionException.class).policy(wrapBackoffPolicy(backoffPolicy()));
|
||||
}
|
||||
|
||||
protected abstract BulkRequest buildBulk(Iterable<SearchHit> docs);
|
||||
|
||||
protected abstract Response buildResponse(TimeValue took, List<Failure> indexingFailures, List<ShardSearchFailure> searchFailures);
|
||||
|
||||
public void start() {
|
||||
initialSearch();
|
||||
}
|
||||
|
||||
public BulkByScrollTask getTask() {
|
||||
return task;
|
||||
}
|
||||
|
||||
void initialSearch() {
|
||||
if (task.isCancelled()) {
|
||||
finishHim(null);
|
||||
return;
|
||||
}
|
||||
try {
|
||||
// Default to sorting by _doc if it hasn't been changed.
|
||||
if (firstSearchRequest.source().sorts() == null) {
|
||||
firstSearchRequest.source().sort(fieldSort("_doc"));
|
||||
}
|
||||
startTime.set(System.nanoTime());
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("executing initial scroll against {}{}",
|
||||
firstSearchRequest.indices() == null || firstSearchRequest.indices().length == 0 ? "all indices"
|
||||
: firstSearchRequest.indices(),
|
||||
firstSearchRequest.types() == null || firstSearchRequest.types().length == 0 ? ""
|
||||
: firstSearchRequest.types());
|
||||
}
|
||||
client.search(firstSearchRequest, new ActionListener<SearchResponse>() {
|
||||
@Override
|
||||
public void onResponse(SearchResponse response) {
|
||||
logger.debug("[{}] documents match query", response.getHits().getTotalHits());
|
||||
onScrollResponse(response);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
finishHim(e);
|
||||
}
|
||||
});
|
||||
} catch (Throwable t) {
|
||||
finishHim(t);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the last returned scrollId. Package private for testing.
|
||||
*/
|
||||
void setScroll(String scroll) {
|
||||
this.scroll.set(scroll);
|
||||
}
|
||||
|
||||
void onScrollResponse(SearchResponse searchResponse) {
|
||||
if (task.isCancelled()) {
|
||||
finishHim(null);
|
||||
return;
|
||||
}
|
||||
setScroll(searchResponse.getScrollId());
|
||||
if (searchResponse.getShardFailures() != null && searchResponse.getShardFailures().length > 0) {
|
||||
startNormalTermination(emptyList(), unmodifiableList(Arrays.asList(searchResponse.getShardFailures())));
|
||||
return;
|
||||
}
|
||||
long total = searchResponse.getHits().totalHits();
|
||||
if (mainRequest.getSize() > 0) {
|
||||
total = min(total, mainRequest.getSize());
|
||||
}
|
||||
task.setTotal(total);
|
||||
threadPool.generic().execute(new AbstractRunnable() {
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
SearchHit[] docs = searchResponse.getHits().getHits();
|
||||
logger.debug("scroll returned [{}] documents with a scroll id of [{}]", docs.length, searchResponse.getScrollId());
|
||||
if (docs.length == 0) {
|
||||
startNormalTermination(emptyList(), emptyList());
|
||||
return;
|
||||
}
|
||||
task.countBatch();
|
||||
List<SearchHit> docsIterable = Arrays.asList(docs);
|
||||
if (mainRequest.getSize() != SIZE_ALL_MATCHES) {
|
||||
// Truncate the docs if we have more than the request size
|
||||
long remaining = max(0, mainRequest.getSize() - task.getSuccessfullyProcessed());
|
||||
if (remaining < docs.length) {
|
||||
docsIterable = docsIterable.subList(0, (int) remaining);
|
||||
}
|
||||
}
|
||||
BulkRequest request = buildBulk(docsIterable);
|
||||
if (request.requests().isEmpty()) {
|
||||
/*
|
||||
* If we noop-ed the entire batch then just skip to the next batch or the BulkRequest would fail validation.
|
||||
*/
|
||||
startNextScroll();
|
||||
return;
|
||||
}
|
||||
request.timeout(mainRequest.getTimeout());
|
||||
request.consistencyLevel(mainRequest.getConsistency());
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("sending [{}] entry, [{}] bulk request", request.requests().size(),
|
||||
new ByteSizeValue(request.estimatedSizeInBytes()));
|
||||
}
|
||||
sendBulkRequest(request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
finishHim(t);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void sendBulkRequest(BulkRequest request) {
|
||||
if (task.isCancelled()) {
|
||||
finishHim(null);
|
||||
return;
|
||||
}
|
||||
retry.withAsyncBackoff(client, request, new ActionListener<BulkResponse>() {
|
||||
@Override
|
||||
public void onResponse(BulkResponse response) {
|
||||
onBulkResponse(response);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
finishHim(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void onBulkResponse(BulkResponse response) {
|
||||
if (task.isCancelled()) {
|
||||
finishHim(null);
|
||||
return;
|
||||
}
|
||||
try {
|
||||
List<Failure> failures = new ArrayList<Failure>();
|
||||
Set<String> destinationIndicesThisBatch = new HashSet<>();
|
||||
for (BulkItemResponse item : response) {
|
||||
if (item.isFailed()) {
|
||||
recordFailure(item.getFailure(), failures);
|
||||
continue;
|
||||
}
|
||||
|
||||
switch (item.getOpType()) {
|
||||
case "index":
|
||||
case "create":
|
||||
IndexResponse ir = item.getResponse();
|
||||
if (ir.isCreated()) {
|
||||
task.countCreated();
|
||||
} else {
|
||||
task.countUpdated();
|
||||
}
|
||||
break;
|
||||
case "delete":
|
||||
task.countDeleted();
|
||||
break;
|
||||
default:
|
||||
throw new IllegalArgumentException("Unknown op type: " + item.getOpType());
|
||||
}
|
||||
// Track the indexes we've seen so we can refresh them if requested
|
||||
destinationIndices.add(item.getIndex());
|
||||
}
|
||||
destinationIndices.addAll(destinationIndicesThisBatch);
|
||||
|
||||
if (false == failures.isEmpty()) {
|
||||
startNormalTermination(unmodifiableList(failures), emptyList());
|
||||
return;
|
||||
}
|
||||
|
||||
if (mainRequest.getSize() != SIZE_ALL_MATCHES && task.getSuccessfullyProcessed() >= mainRequest.getSize()) {
|
||||
// We've processed all the requested docs.
|
||||
startNormalTermination(emptyList(), emptyList());
|
||||
return;
|
||||
}
|
||||
startNextScroll();
|
||||
} catch (Throwable t) {
|
||||
finishHim(t);
|
||||
}
|
||||
}
|
||||
|
||||
void startNextScroll() {
|
||||
if (task.isCancelled()) {
|
||||
finishHim(null);
|
||||
return;
|
||||
}
|
||||
SearchScrollRequest request = new SearchScrollRequest();
|
||||
request.scrollId(scroll.get()).scroll(firstSearchRequest.scroll());
|
||||
client.searchScroll(request, new ActionListener<SearchResponse>() {
|
||||
@Override
|
||||
public void onResponse(SearchResponse response) {
|
||||
onScrollResponse(response);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
finishHim(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private void recordFailure(Failure failure, List<Failure> failures) {
|
||||
if (failure.getStatus() == CONFLICT) {
|
||||
task.countVersionConflict();
|
||||
if (false == mainRequest.isAbortOnVersionConflict()) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
failures.add(failure);
|
||||
}
|
||||
|
||||
void startNormalTermination(List<Failure> indexingFailures, List<ShardSearchFailure> searchFailures) {
|
||||
if (false == mainRequest.isRefresh()) {
|
||||
finishHim(null, indexingFailures, searchFailures);
|
||||
return;
|
||||
}
|
||||
RefreshRequest refresh = new RefreshRequest();
|
||||
refresh.indices(destinationIndices.toArray(new String[destinationIndices.size()]));
|
||||
client.admin().indices().refresh(refresh, new ActionListener<RefreshResponse>() {
|
||||
@Override
|
||||
public void onResponse(RefreshResponse response) {
|
||||
finishHim(null, indexingFailures, searchFailures);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
finishHim(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Finish the request.
|
||||
*
|
||||
* @param failure if non null then the request failed catastrophically with this exception
|
||||
*/
|
||||
void finishHim(Throwable failure) {
|
||||
finishHim(failure, emptyList(), emptyList());
|
||||
}
|
||||
|
||||
/**
|
||||
* Finish the request.
|
||||
*
|
||||
* @param failure if non null then the request failed catastrophically with this exception
|
||||
* @param indexingFailures any indexing failures accumulated during the request
|
||||
* @param searchFailures any search failures accumulated during the request
|
||||
*/
|
||||
void finishHim(Throwable failure, List<Failure> indexingFailures, List<ShardSearchFailure> searchFailures) {
|
||||
String scrollId = scroll.get();
|
||||
if (Strings.hasLength(scrollId)) {
|
||||
/*
|
||||
* Fire off the clear scroll but don't wait for it it return before
|
||||
* we send the use their response.
|
||||
*/
|
||||
ClearScrollRequest clearScrollRequest = new ClearScrollRequest();
|
||||
clearScrollRequest.addScrollId(scrollId);
|
||||
client.clearScroll(clearScrollRequest, new ActionListener<ClearScrollResponse>() {
|
||||
@Override
|
||||
public void onResponse(ClearScrollResponse response) {
|
||||
logger.debug("Freed [{}] contexts", response.getNumFreed());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
logger.warn("Failed to clear scroll [" + scrollId + ']', e);
|
||||
}
|
||||
});
|
||||
}
|
||||
if (failure == null) {
|
||||
listener.onResponse(buildResponse(timeValueNanos(System.nanoTime() - startTime.get()), indexingFailures, searchFailures));
|
||||
} else {
|
||||
listener.onFailure(failure);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the backoff policy for use with retries. Package private for testing.
|
||||
*/
|
||||
BackoffPolicy backoffPolicy() {
|
||||
return exponentialBackoff(mainRequest.getRetryBackoffInitialTime(), mainRequest.getMaxRetries());
|
||||
}
|
||||
|
||||
/**
|
||||
* Wraps a backoffPolicy in another policy that counts the number of backoffs acquired.
|
||||
*/
|
||||
private BackoffPolicy wrapBackoffPolicy(BackoffPolicy backoffPolicy) {
|
||||
return new BackoffPolicy() {
|
||||
@Override
|
||||
public Iterator<TimeValue> iterator() {
|
||||
return new Iterator<TimeValue>() {
|
||||
private final Iterator<TimeValue> delegate = backoffPolicy.iterator();
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return delegate.hasNext();
|
||||
}
|
||||
|
||||
@Override
|
||||
public TimeValue next() {
|
||||
if (false == delegate.hasNext()) {
|
||||
return null;
|
||||
}
|
||||
task.countRetry();
|
||||
return delegate.next();
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
|
@ -0,0 +1,238 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.index.mapper.internal.IdFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.IndexFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.RoutingFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.SourceFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.TTLFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.TimestampFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.VersionFieldMapper;
|
||||
import org.elasticsearch.script.CompiledScript;
|
||||
import org.elasticsearch.script.ExecutableScript;
|
||||
import org.elasticsearch.script.ScriptContext;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.SearchHitField;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
|
||||
/**
|
||||
* Abstract base for scrolling across a search and executing bulk indexes on all
|
||||
* results.
|
||||
*/
|
||||
public abstract class AbstractAsyncBulkIndexByScrollAction<
|
||||
Request extends AbstractBulkIndexByScrollRequest<Request>,
|
||||
Response extends BulkIndexByScrollResponse>
|
||||
extends AbstractAsyncBulkByScrollAction<Request, Response> {
|
||||
|
||||
private final ScriptService scriptService;
|
||||
private final CompiledScript script;
|
||||
|
||||
public AbstractAsyncBulkIndexByScrollAction(BulkByScrollTask task, ESLogger logger, ScriptService scriptService,
|
||||
Client client, ThreadPool threadPool, Request mainRequest, SearchRequest firstSearchRequest,
|
||||
ActionListener<Response> listener) {
|
||||
super(task, logger, client, threadPool, mainRequest, firstSearchRequest, listener);
|
||||
this.scriptService = scriptService;
|
||||
if (mainRequest.getScript() == null) {
|
||||
script = null;
|
||||
} else {
|
||||
script = scriptService.compile(mainRequest.getScript(), ScriptContext.Standard.UPDATE, emptyMap());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the IndexRequest for a single search hit. This shouldn't handle
|
||||
* metadata or the script. That will be handled by copyMetadata and
|
||||
* applyScript functions that can be overridden.
|
||||
*/
|
||||
protected abstract IndexRequest buildIndexRequest(SearchHit doc);
|
||||
|
||||
@Override
|
||||
protected BulkRequest buildBulk(Iterable<SearchHit> docs) {
|
||||
BulkRequest bulkRequest = new BulkRequest();
|
||||
ExecutableScript executableScript = null;
|
||||
Map<String, Object> scriptCtx = null;
|
||||
|
||||
for (SearchHit doc : docs) {
|
||||
IndexRequest index = buildIndexRequest(doc);
|
||||
copyMetadata(index, doc);
|
||||
if (script != null) {
|
||||
if (executableScript == null) {
|
||||
executableScript = scriptService.executable(script, mainRequest.getScript().getParams());
|
||||
scriptCtx = new HashMap<>();
|
||||
}
|
||||
if (false == applyScript(index, doc, executableScript, scriptCtx)) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
bulkRequest.add(index);
|
||||
}
|
||||
|
||||
return bulkRequest;
|
||||
}
|
||||
|
||||
/**
|
||||
* Copies the metadata from a hit to the index request.
|
||||
*/
|
||||
protected void copyMetadata(IndexRequest index, SearchHit doc) {
|
||||
index.parent(fieldValue(doc, ParentFieldMapper.NAME));
|
||||
copyRouting(index, doc);
|
||||
// Comes back as a Long but needs to be a string
|
||||
Long timestamp = fieldValue(doc, TimestampFieldMapper.NAME);
|
||||
if (timestamp != null) {
|
||||
index.timestamp(timestamp.toString());
|
||||
}
|
||||
Long ttl = fieldValue(doc, TTLFieldMapper.NAME);
|
||||
if (ttl != null) {
|
||||
index.ttl(ttl);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Part of copyMetadata but called out individual for easy overwriting.
|
||||
*/
|
||||
protected void copyRouting(IndexRequest index, SearchHit doc) {
|
||||
index.routing(fieldValue(doc, RoutingFieldMapper.NAME));
|
||||
}
|
||||
|
||||
protected <T> T fieldValue(SearchHit doc, String fieldName) {
|
||||
SearchHitField field = doc.field(fieldName);
|
||||
return field == null ? null : field.value();
|
||||
}
|
||||
|
||||
/**
|
||||
* Apply a script to the request.
|
||||
*
|
||||
* @return is this request still ok to apply (true) or is it a noop (false)
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
protected boolean applyScript(IndexRequest index, SearchHit doc, ExecutableScript script, final Map<String, Object> ctx) {
|
||||
if (script == null) {
|
||||
return true;
|
||||
}
|
||||
ctx.put(IndexFieldMapper.NAME, doc.index());
|
||||
ctx.put(TypeFieldMapper.NAME, doc.type());
|
||||
ctx.put(IdFieldMapper.NAME, doc.id());
|
||||
Long oldVersion = doc.getVersion();
|
||||
ctx.put(VersionFieldMapper.NAME, oldVersion);
|
||||
String oldParent = fieldValue(doc, ParentFieldMapper.NAME);
|
||||
ctx.put(ParentFieldMapper.NAME, oldParent);
|
||||
String oldRouting = fieldValue(doc, RoutingFieldMapper.NAME);
|
||||
ctx.put(RoutingFieldMapper.NAME, oldRouting);
|
||||
Long oldTimestamp = fieldValue(doc, TimestampFieldMapper.NAME);
|
||||
ctx.put(TimestampFieldMapper.NAME, oldTimestamp);
|
||||
Long oldTTL = fieldValue(doc, TTLFieldMapper.NAME);
|
||||
ctx.put(TTLFieldMapper.NAME, oldTTL);
|
||||
ctx.put(SourceFieldMapper.NAME, index.sourceAsMap());
|
||||
ctx.put("op", "update");
|
||||
script.setNextVar("ctx", ctx);
|
||||
script.run();
|
||||
Map<String, Object> resultCtx = (Map<String, Object>) script.unwrap(ctx);
|
||||
String newOp = (String) resultCtx.remove("op");
|
||||
if (newOp == null) {
|
||||
throw new IllegalArgumentException("Script cleared op!");
|
||||
}
|
||||
if ("noop".equals(newOp)) {
|
||||
task.countNoop();
|
||||
return false;
|
||||
}
|
||||
if (false == "update".equals(newOp)) {
|
||||
throw new IllegalArgumentException("Invalid op [" + newOp + ']');
|
||||
}
|
||||
|
||||
/*
|
||||
* It'd be lovely to only set the source if we know its been modified
|
||||
* but it isn't worth keeping two copies of it around just to check!
|
||||
*/
|
||||
index.source((Map<String, Object>) resultCtx.remove(SourceFieldMapper.NAME));
|
||||
|
||||
Object newValue = ctx.remove(IndexFieldMapper.NAME);
|
||||
if (false == doc.index().equals(newValue)) {
|
||||
scriptChangedIndex(index, newValue);
|
||||
}
|
||||
newValue = ctx.remove(TypeFieldMapper.NAME);
|
||||
if (false == doc.type().equals(newValue)) {
|
||||
scriptChangedType(index, newValue);
|
||||
}
|
||||
newValue = ctx.remove(IdFieldMapper.NAME);
|
||||
if (false == doc.id().equals(newValue)) {
|
||||
scriptChangedId(index, newValue);
|
||||
}
|
||||
newValue = ctx.remove(VersionFieldMapper.NAME);
|
||||
if (false == Objects.equals(oldVersion, newValue)) {
|
||||
scriptChangedVersion(index, newValue);
|
||||
}
|
||||
newValue = ctx.remove(ParentFieldMapper.NAME);
|
||||
if (false == Objects.equals(oldParent, newValue)) {
|
||||
scriptChangedParent(index, newValue);
|
||||
}
|
||||
/*
|
||||
* Its important that routing comes after parent in case you want to
|
||||
* change them both.
|
||||
*/
|
||||
newValue = ctx.remove(RoutingFieldMapper.NAME);
|
||||
if (false == Objects.equals(oldRouting, newValue)) {
|
||||
scriptChangedRouting(index, newValue);
|
||||
}
|
||||
newValue = ctx.remove(TimestampFieldMapper.NAME);
|
||||
if (false == Objects.equals(oldTimestamp, newValue)) {
|
||||
scriptChangedTimestamp(index, newValue);
|
||||
}
|
||||
newValue = ctx.remove(TTLFieldMapper.NAME);
|
||||
if (false == Objects.equals(oldTTL, newValue)) {
|
||||
scriptChangedTTL(index, newValue);
|
||||
}
|
||||
if (false == ctx.isEmpty()) {
|
||||
throw new IllegalArgumentException("Invalid fields added to ctx [" + String.join(",", ctx.keySet()) + ']');
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
protected abstract void scriptChangedIndex(IndexRequest index, Object to);
|
||||
|
||||
protected abstract void scriptChangedType(IndexRequest index, Object to);
|
||||
|
||||
protected abstract void scriptChangedId(IndexRequest index, Object to);
|
||||
|
||||
protected abstract void scriptChangedVersion(IndexRequest index, Object to);
|
||||
|
||||
protected abstract void scriptChangedRouting(IndexRequest index, Object to);
|
||||
|
||||
protected abstract void scriptChangedParent(IndexRequest index, Object to);
|
||||
|
||||
protected abstract void scriptChangedTimestamp(IndexRequest index, Object to);
|
||||
|
||||
protected abstract void scriptChangedTTL(IndexRequest index, Object to);
|
||||
}
|
|
@ -0,0 +1,83 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.support.TransportAction;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
|
||||
import org.elasticsearch.rest.BaseRestHandler;
|
||||
import org.elasticsearch.rest.BytesRestResponse;
|
||||
import org.elasticsearch.rest.RestChannel;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.search.aggregations.AggregatorParsers;
|
||||
import org.elasticsearch.tasks.LoggingTaskListener;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public abstract class AbstractBaseReindexRestHandler<Request extends ActionRequest<Request>, Response extends BulkIndexByScrollResponse,
|
||||
TA extends TransportAction<Request, Response>> extends BaseRestHandler {
|
||||
protected final IndicesQueriesRegistry indicesQueriesRegistry;
|
||||
protected final AggregatorParsers aggParsers;
|
||||
private final ClusterService clusterService;
|
||||
private final TA action;
|
||||
|
||||
protected AbstractBaseReindexRestHandler(Settings settings, Client client,
|
||||
IndicesQueriesRegistry indicesQueriesRegistry, AggregatorParsers aggParsers, ClusterService clusterService, TA action) {
|
||||
super(settings, client);
|
||||
this.indicesQueriesRegistry = indicesQueriesRegistry;
|
||||
this.aggParsers = aggParsers;
|
||||
this.clusterService = clusterService;
|
||||
this.action = action;
|
||||
}
|
||||
|
||||
protected void execute(RestRequest request, Request internalRequest, RestChannel channel) throws IOException {
|
||||
if (request.paramAsBoolean("wait_for_completion", true)) {
|
||||
action.execute(internalRequest, new BulkIndexByScrollResponseContentListener<Response>(channel));
|
||||
return;
|
||||
}
|
||||
/*
|
||||
* Lets try and validate before forking so the user gets some error. The
|
||||
* task can't totally validate until it starts but this is better than
|
||||
* nothing.
|
||||
*/
|
||||
ActionRequestValidationException validationException = internalRequest.validate();
|
||||
if (validationException != null) {
|
||||
channel.sendResponse(new BytesRestResponse(channel, validationException));
|
||||
return;
|
||||
}
|
||||
Task task = action.execute(internalRequest, LoggingTaskListener.instance());
|
||||
sendTask(channel, task);
|
||||
}
|
||||
|
||||
private void sendTask(RestChannel channel, Task task) throws IOException {
|
||||
XContentBuilder builder = channel.newBuilder();
|
||||
builder.startObject();
|
||||
builder.field("task", clusterService.localNode().getId() + ":" + task.getId());
|
||||
builder.endObject();
|
||||
channel.sendResponse(new BytesRestResponse(RestStatus.OK, builder));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,301 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.WriteConsistencyLevel;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.support.replication.ReplicationRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes;
|
||||
|
||||
public abstract class AbstractBulkByScrollRequest<Self extends AbstractBulkByScrollRequest<Self>>
|
||||
extends ActionRequest<Self> {
|
||||
public static final int SIZE_ALL_MATCHES = -1;
|
||||
private static final TimeValue DEFAULT_SCROLL_TIMEOUT = timeValueMinutes(5);
|
||||
private static final int DEFAULT_SCROLL_SIZE = 100;
|
||||
|
||||
/**
|
||||
* The search to be executed.
|
||||
*/
|
||||
private SearchRequest searchRequest;
|
||||
|
||||
/**
|
||||
* Maximum number of processed documents. Defaults to -1 meaning process all
|
||||
* documents.
|
||||
*/
|
||||
private int size = SIZE_ALL_MATCHES;
|
||||
|
||||
/**
|
||||
* Should version conflicts cause aborts? Defaults to true.
|
||||
*/
|
||||
private boolean abortOnVersionConflict = true;
|
||||
|
||||
/**
|
||||
* Call refresh on the indexes we've written to after the request ends?
|
||||
*/
|
||||
private boolean refresh = false;
|
||||
|
||||
/**
|
||||
* Timeout to wait for the shards on to be available for each bulk request?
|
||||
*/
|
||||
private TimeValue timeout = ReplicationRequest.DEFAULT_TIMEOUT;
|
||||
|
||||
/**
|
||||
* Consistency level for write requests.
|
||||
*/
|
||||
private WriteConsistencyLevel consistency = WriteConsistencyLevel.DEFAULT;
|
||||
|
||||
/**
|
||||
* Initial delay after a rejection before retrying a bulk request. With the default maxRetries the total backoff for retrying rejections
|
||||
* is about one minute per bulk request. Once the entire bulk request is successful the retry counter resets.
|
||||
*/
|
||||
private TimeValue retryBackoffInitialTime = timeValueMillis(500);
|
||||
|
||||
/**
|
||||
* Total number of retries attempted for rejections. There is no way to ask for unlimited retries.
|
||||
*/
|
||||
private int maxRetries = 11;
|
||||
|
||||
public AbstractBulkByScrollRequest() {
|
||||
}
|
||||
|
||||
public AbstractBulkByScrollRequest(SearchRequest source) {
|
||||
this.searchRequest = source;
|
||||
|
||||
// Set the defaults which differ from SearchRequest's defaults.
|
||||
source.scroll(DEFAULT_SCROLL_TIMEOUT);
|
||||
source.source(new SearchSourceBuilder());
|
||||
source.source().version(true);
|
||||
source.source().size(DEFAULT_SCROLL_SIZE);
|
||||
}
|
||||
|
||||
/**
|
||||
* `this` cast to Self. Used for building fluent methods without cast
|
||||
* warnings.
|
||||
*/
|
||||
protected abstract Self self();
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
ActionRequestValidationException e = searchRequest.validate();
|
||||
if (searchRequest.source().from() != -1) {
|
||||
e = addValidationError("from is not supported in this context", e);
|
||||
}
|
||||
if (maxRetries < 0) {
|
||||
e = addValidationError("retries cannnot be negative", e);
|
||||
}
|
||||
if (false == (size == -1 || size > 0)) {
|
||||
e = addValidationError(
|
||||
"size should be greater than 0 if the request is limited to some number of documents or -1 if it isn't but it was ["
|
||||
+ size + "]",
|
||||
e);
|
||||
}
|
||||
return e;
|
||||
}
|
||||
|
||||
/**
|
||||
* Maximum number of processed documents. Defaults to -1 meaning process all
|
||||
* documents.
|
||||
*/
|
||||
public int getSize() {
|
||||
return size;
|
||||
}
|
||||
|
||||
/**
|
||||
* Maximum number of processed documents. Defaults to -1 meaning process all
|
||||
* documents.
|
||||
*/
|
||||
public Self setSize(int size) {
|
||||
this.size = size;
|
||||
return self();
|
||||
}
|
||||
|
||||
/**
|
||||
* Should version conflicts cause aborts? Defaults to false.
|
||||
*/
|
||||
public boolean isAbortOnVersionConflict() {
|
||||
return abortOnVersionConflict;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should version conflicts cause aborts? Defaults to false.
|
||||
*/
|
||||
public Self setAbortOnVersionConflict(boolean abortOnVersionConflict) {
|
||||
this.abortOnVersionConflict = abortOnVersionConflict;
|
||||
return self();
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets abortOnVersionConflict based on REST-friendly names.
|
||||
*/
|
||||
public void setConflicts(String conflicts) {
|
||||
switch (conflicts) {
|
||||
case "proceed":
|
||||
setAbortOnVersionConflict(false);
|
||||
return;
|
||||
case "abort":
|
||||
setAbortOnVersionConflict(true);
|
||||
return;
|
||||
default:
|
||||
throw new IllegalArgumentException("conflicts may only be \"proceed\" or \"abort\" but was [" + conflicts + "]");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The search request that matches the documents to process.
|
||||
*/
|
||||
public SearchRequest getSearchRequest() {
|
||||
return searchRequest;
|
||||
}
|
||||
|
||||
/**
|
||||
* Call refresh on the indexes we've written to after the request ends?
|
||||
*/
|
||||
public boolean isRefresh() {
|
||||
return refresh;
|
||||
}
|
||||
|
||||
/**
|
||||
* Call refresh on the indexes we've written to after the request ends?
|
||||
*/
|
||||
public Self setRefresh(boolean refresh) {
|
||||
this.refresh = refresh;
|
||||
return self();
|
||||
}
|
||||
|
||||
/**
|
||||
* Timeout to wait for the shards on to be available for each bulk request?
|
||||
*/
|
||||
public TimeValue getTimeout() {
|
||||
return timeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* Timeout to wait for the shards on to be available for each bulk request?
|
||||
*/
|
||||
public Self setTimeout(TimeValue timeout) {
|
||||
this.timeout = timeout;
|
||||
return self();
|
||||
}
|
||||
|
||||
/**
|
||||
* Consistency level for write requests.
|
||||
*/
|
||||
public WriteConsistencyLevel getConsistency() {
|
||||
return consistency;
|
||||
}
|
||||
|
||||
/**
|
||||
* Consistency level for write requests.
|
||||
*/
|
||||
public Self setConsistency(WriteConsistencyLevel consistency) {
|
||||
this.consistency = consistency;
|
||||
return self();
|
||||
}
|
||||
|
||||
/**
|
||||
* Initial delay after a rejection before retrying request.
|
||||
*/
|
||||
public TimeValue getRetryBackoffInitialTime() {
|
||||
return retryBackoffInitialTime;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the initial delay after a rejection before retrying request.
|
||||
*/
|
||||
public Self setRetryBackoffInitialTime(TimeValue retryBackoffInitialTime) {
|
||||
this.retryBackoffInitialTime = retryBackoffInitialTime;
|
||||
return self();
|
||||
}
|
||||
|
||||
/**
|
||||
* Total number of retries attempted for rejections.
|
||||
*/
|
||||
public int getMaxRetries() {
|
||||
return maxRetries;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the total number of retries attempted for rejections. There is no way to ask for unlimited retries.
|
||||
*/
|
||||
public Self setMaxRetries(int maxRetries) {
|
||||
this.maxRetries = maxRetries;
|
||||
return self();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Task createTask(long id, String type, String action) {
|
||||
return new BulkByScrollTask(id, type, action, getDescription());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
searchRequest = new SearchRequest();
|
||||
searchRequest.readFrom(in);
|
||||
abortOnVersionConflict = in.readBoolean();
|
||||
size = in.readVInt();
|
||||
refresh = in.readBoolean();
|
||||
timeout = TimeValue.readTimeValue(in);
|
||||
consistency = WriteConsistencyLevel.fromId(in.readByte());
|
||||
retryBackoffInitialTime = TimeValue.readTimeValue(in);
|
||||
maxRetries = in.readVInt();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
searchRequest.writeTo(out);
|
||||
out.writeBoolean(abortOnVersionConflict);
|
||||
out.writeVInt(size);
|
||||
out.writeBoolean(refresh);
|
||||
timeout.writeTo(out);
|
||||
out.writeByte(consistency.id());
|
||||
retryBackoffInitialTime.writeTo(out);
|
||||
out.writeVInt(maxRetries);
|
||||
}
|
||||
|
||||
/**
|
||||
* Append a short description of the search request to a StringBuilder. Used
|
||||
* to make toString.
|
||||
*/
|
||||
protected void searchToString(StringBuilder b) {
|
||||
if (searchRequest.indices() != null && searchRequest.indices().length != 0) {
|
||||
b.append(Arrays.toString(searchRequest.indices()));
|
||||
} else {
|
||||
b.append("[all indices]");
|
||||
}
|
||||
if (searchRequest.types() != null && searchRequest.types().length != 0) {
|
||||
b.append(Arrays.toString(searchRequest.types()));
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,109 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.WriteConsistencyLevel;
|
||||
import org.elasticsearch.action.search.SearchRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
|
||||
public abstract class AbstractBulkByScrollRequestBuilder<
|
||||
Request extends AbstractBulkByScrollRequest<Request>,
|
||||
Response extends ActionResponse,
|
||||
Self extends AbstractBulkByScrollRequestBuilder<Request, Response, Self>>
|
||||
extends ActionRequestBuilder<Request, Response, Self> {
|
||||
private final SearchRequestBuilder source;
|
||||
|
||||
protected AbstractBulkByScrollRequestBuilder(ElasticsearchClient client,
|
||||
Action<Request, Response, Self> action, SearchRequestBuilder source, Request request) {
|
||||
super(client, action, request);
|
||||
this.source = source;
|
||||
}
|
||||
|
||||
protected abstract Self self();
|
||||
|
||||
/**
|
||||
* The search used to find documents to process.
|
||||
*/
|
||||
public SearchRequestBuilder source() {
|
||||
return source;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the source indices.
|
||||
*/
|
||||
public Self source(String... indices) {
|
||||
source.setIndices(indices);
|
||||
return self();
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the query that will filter the source. Just a convenience method for
|
||||
* easy chaining.
|
||||
*/
|
||||
public Self filter(QueryBuilder<?> filter) {
|
||||
source.setQuery(filter);
|
||||
return self();
|
||||
}
|
||||
|
||||
/**
|
||||
* The maximum number of documents to attempt.
|
||||
*/
|
||||
public Self size(int size) {
|
||||
request.setSize(size);
|
||||
return self();
|
||||
}
|
||||
|
||||
/**
|
||||
* Should we version conflicts cause the action to abort?
|
||||
*/
|
||||
public Self abortOnVersionConflict(boolean abortOnVersionConflict) {
|
||||
request.setAbortOnVersionConflict(abortOnVersionConflict);
|
||||
return self();
|
||||
}
|
||||
|
||||
/**
|
||||
* Call refresh on the indexes we've written to after the request ends?
|
||||
*/
|
||||
public Self refresh(boolean refresh) {
|
||||
request.setRefresh(refresh);
|
||||
return self();
|
||||
}
|
||||
|
||||
/**
|
||||
* Timeout to wait for the shards on to be available for each bulk request.
|
||||
*/
|
||||
public Self timeout(TimeValue timeout) {
|
||||
request.setTimeout(timeout);
|
||||
return self();
|
||||
}
|
||||
|
||||
/**
|
||||
* Consistency level for write requests.
|
||||
*/
|
||||
public Self consistency(WriteConsistencyLevel consistency) {
|
||||
request.setConsistency(consistency);
|
||||
return self();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,80 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.script.Script;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public abstract class AbstractBulkIndexByScrollRequest<Self extends AbstractBulkIndexByScrollRequest<Self>>
|
||||
extends AbstractBulkByScrollRequest<Self> {
|
||||
/**
|
||||
* Script to modify the documents before they are processed.
|
||||
*/
|
||||
private Script script;
|
||||
|
||||
public AbstractBulkIndexByScrollRequest() {
|
||||
}
|
||||
|
||||
public AbstractBulkIndexByScrollRequest(SearchRequest source) {
|
||||
super(source);
|
||||
}
|
||||
|
||||
/**
|
||||
* Script to modify the documents before they are processed.
|
||||
*/
|
||||
public Script getScript() {
|
||||
return script;
|
||||
}
|
||||
|
||||
/**
|
||||
* Script to modify the documents before they are processed.
|
||||
*/
|
||||
public Self setScript(@Nullable Script script) {
|
||||
this.script = script;
|
||||
return self();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
if (in.readBoolean()) {
|
||||
script = Script.readScript(in);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeOptionalStreamable(script);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void searchToString(StringBuilder b) {
|
||||
super.searchToString(b);
|
||||
if (script != null) {
|
||||
b.append(" updated with [").append(script).append(']');
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,46 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.search.SearchRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.script.Script;
|
||||
|
||||
public abstract class AbstractBulkIndexByScrollRequestBuilder<
|
||||
Request extends AbstractBulkIndexByScrollRequest<Request>,
|
||||
Response extends ActionResponse,
|
||||
Self extends AbstractBulkIndexByScrollRequestBuilder<Request, Response, Self>>
|
||||
extends AbstractBulkByScrollRequestBuilder<Request, Response, Self> {
|
||||
|
||||
protected AbstractBulkIndexByScrollRequestBuilder(ElasticsearchClient client,
|
||||
Action<Request, Response, Self> action, SearchRequestBuilder search, Request request) {
|
||||
super(client, action, search, request);
|
||||
}
|
||||
|
||||
/**
|
||||
* Script to modify the documents before they are processed.
|
||||
*/
|
||||
public Self script(Script script) {
|
||||
request.setScript(script);
|
||||
return self();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,290 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.tasks.CancellableTask;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
/**
|
||||
* Task storing information about a currently running BulkByScroll request.
|
||||
*/
|
||||
public class BulkByScrollTask extends CancellableTask {
|
||||
/**
|
||||
* The total number of documents this request will process. 0 means we don't yet know or, possibly, there are actually 0 documents
|
||||
* to process. Its ok that these have the same meaning because any request with 0 actual documents should be quite short lived.
|
||||
*/
|
||||
private final AtomicLong total = new AtomicLong(0);
|
||||
private final AtomicLong updated = new AtomicLong(0);
|
||||
private final AtomicLong created = new AtomicLong(0);
|
||||
private final AtomicLong deleted = new AtomicLong(0);
|
||||
private final AtomicLong noops = new AtomicLong(0);
|
||||
private final AtomicInteger batch = new AtomicInteger(0);
|
||||
private final AtomicLong versionConflicts = new AtomicLong(0);
|
||||
private final AtomicLong retries = new AtomicLong(0);
|
||||
|
||||
public BulkByScrollTask(long id, String type, String action, String description) {
|
||||
super(id, type, action, description);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Status getStatus() {
|
||||
return new Status(total.get(), updated.get(), created.get(), deleted.get(), batch.get(), versionConflicts.get(), noops.get(),
|
||||
retries.get(), getReasonCancelled());
|
||||
}
|
||||
|
||||
/**
|
||||
* Total number of successfully processed documents.
|
||||
*/
|
||||
public long getSuccessfullyProcessed() {
|
||||
return updated.get() + created.get() + deleted.get();
|
||||
}
|
||||
|
||||
public static class Status implements Task.Status {
|
||||
public static final Status PROTOTYPE = new Status(0, 0, 0, 0, 0, 0, 0, 0, null);
|
||||
|
||||
private final long total;
|
||||
private final long updated;
|
||||
private final long created;
|
||||
private final long deleted;
|
||||
private final int batches;
|
||||
private final long versionConflicts;
|
||||
private final long noops;
|
||||
private final long retries;
|
||||
private final String reasonCancelled;
|
||||
|
||||
public Status(long total, long updated, long created, long deleted, int batches, long versionConflicts, long noops, long retries,
|
||||
@Nullable String reasonCancelled) {
|
||||
this.total = checkPositive(total, "total");
|
||||
this.updated = checkPositive(updated, "updated");
|
||||
this.created = checkPositive(created, "created");
|
||||
this.deleted = checkPositive(deleted, "deleted");
|
||||
this.batches = checkPositive(batches, "batches");
|
||||
this.versionConflicts = checkPositive(versionConflicts, "versionConflicts");
|
||||
this.noops = checkPositive(noops, "noops");
|
||||
this.retries = checkPositive(retries, "retries");
|
||||
this.reasonCancelled = reasonCancelled;
|
||||
}
|
||||
|
||||
public Status(StreamInput in) throws IOException {
|
||||
total = in.readVLong();
|
||||
updated = in.readVLong();
|
||||
created = in.readVLong();
|
||||
deleted = in.readVLong();
|
||||
batches = in.readVInt();
|
||||
versionConflicts = in.readVLong();
|
||||
noops = in.readVLong();
|
||||
retries = in.readVLong();
|
||||
reasonCancelled = in.readOptionalString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVLong(total);
|
||||
out.writeVLong(updated);
|
||||
out.writeVLong(created);
|
||||
out.writeVLong(deleted);
|
||||
out.writeVInt(batches);
|
||||
out.writeVLong(versionConflicts);
|
||||
out.writeVLong(noops);
|
||||
out.writeVLong(retries);
|
||||
out.writeOptionalString(reasonCancelled);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
innerXContent(builder, params, true, true);
|
||||
return builder.endObject();
|
||||
}
|
||||
|
||||
public XContentBuilder innerXContent(XContentBuilder builder, Params params, boolean includeCreated, boolean includeDeleted)
|
||||
throws IOException {
|
||||
builder.field("total", total);
|
||||
builder.field("updated", updated);
|
||||
if (includeCreated) {
|
||||
builder.field("created", created);
|
||||
}
|
||||
if (includeDeleted) {
|
||||
builder.field("deleted", deleted);
|
||||
}
|
||||
builder.field("batches", batches);
|
||||
builder.field("version_conflicts", versionConflicts);
|
||||
builder.field("noops", noops);
|
||||
builder.field("retries", retries);
|
||||
if (reasonCancelled != null) {
|
||||
builder.field("canceled", reasonCancelled);
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder builder = new StringBuilder();
|
||||
builder.append("BulkIndexByScrollResponse[");
|
||||
innerToString(builder, true, true);
|
||||
return builder.append(']').toString();
|
||||
}
|
||||
|
||||
public void innerToString(StringBuilder builder, boolean includeCreated, boolean includeDeleted) {
|
||||
builder.append("updated=").append(updated);
|
||||
if (includeCreated) {
|
||||
builder.append(",created=").append(created);
|
||||
}
|
||||
if (includeDeleted) {
|
||||
builder.append(",deleted=").append(deleted);
|
||||
}
|
||||
builder.append(",batches=").append(batches);
|
||||
builder.append(",versionConflicts=").append(versionConflicts);
|
||||
builder.append(",noops=").append(noops);
|
||||
builder.append(",retries=").append(retries);
|
||||
if (reasonCancelled != null) {
|
||||
builder.append(",canceled=").append(reasonCancelled);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getWriteableName() {
|
||||
return "bulk-by-scroll";
|
||||
}
|
||||
|
||||
@Override
|
||||
public Status readFrom(StreamInput in) throws IOException {
|
||||
return new Status(in);
|
||||
}
|
||||
|
||||
/**
|
||||
* The total number of documents this request will process. 0 means we don't yet know or, possibly, there are actually 0 documents
|
||||
* to process. Its ok that these have the same meaning because any request with 0 actual documents should be quite short lived.
|
||||
*/
|
||||
public long getTotal() {
|
||||
return total;
|
||||
}
|
||||
|
||||
/**
|
||||
* Count of documents updated.
|
||||
*/
|
||||
public long getUpdated() {
|
||||
return updated;
|
||||
}
|
||||
|
||||
/**
|
||||
* Count of documents created.
|
||||
*/
|
||||
public long getCreated() {
|
||||
return created;
|
||||
}
|
||||
|
||||
/**
|
||||
* Count of successful delete operations.
|
||||
*/
|
||||
public long getDeleted() {
|
||||
return deleted;
|
||||
}
|
||||
|
||||
/**
|
||||
* Number of scan responses this request has processed.
|
||||
*/
|
||||
public int getBatches() {
|
||||
return batches;
|
||||
}
|
||||
|
||||
/**
|
||||
* Number of version conflicts this request has hit.
|
||||
*/
|
||||
public long getVersionConflicts() {
|
||||
return versionConflicts;
|
||||
}
|
||||
|
||||
/**
|
||||
* Number of noops (skipped bulk items) as part of this request.
|
||||
*/
|
||||
public long getNoops() {
|
||||
return noops;
|
||||
}
|
||||
|
||||
/**
|
||||
* Number of retries that had to be attempted due to rejected executions.
|
||||
*/
|
||||
public long getRetries() {
|
||||
return retries;
|
||||
}
|
||||
|
||||
/**
|
||||
* The reason that the request was canceled or null if it hasn't been.
|
||||
*/
|
||||
public String getReasonCancelled() {
|
||||
return reasonCancelled;
|
||||
}
|
||||
|
||||
private int checkPositive(int value, String name) {
|
||||
if (value < 0) {
|
||||
throw new IllegalArgumentException(name + " must be greater than 0 but was [" + value + "]");
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
private long checkPositive(long value, String name) {
|
||||
if (value < 0) {
|
||||
throw new IllegalArgumentException(name + " must be greater than 0 but was [" + value + "]");
|
||||
}
|
||||
return value;
|
||||
}
|
||||
}
|
||||
|
||||
void setTotal(long totalHits) {
|
||||
total.set(totalHits);
|
||||
}
|
||||
|
||||
void countBatch() {
|
||||
batch.incrementAndGet();
|
||||
}
|
||||
|
||||
void countNoop() {
|
||||
noops.incrementAndGet();
|
||||
}
|
||||
|
||||
void countCreated() {
|
||||
created.incrementAndGet();
|
||||
}
|
||||
|
||||
void countUpdated() {
|
||||
updated.incrementAndGet();
|
||||
}
|
||||
|
||||
void countDeleted() {
|
||||
deleted.incrementAndGet();
|
||||
}
|
||||
|
||||
void countVersionConflict() {
|
||||
versionConflicts.incrementAndGet();
|
||||
}
|
||||
|
||||
void countRetry() {
|
||||
retries.incrementAndGet();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,169 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse.Failure;
|
||||
import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import static java.lang.Math.min;
|
||||
import static java.util.Collections.unmodifiableList;
|
||||
import static java.util.Objects.requireNonNull;
|
||||
import static org.elasticsearch.action.search.ShardSearchFailure.readShardSearchFailure;
|
||||
|
||||
/**
|
||||
* Response used for actions that index many documents using a scroll request.
|
||||
*/
|
||||
public class BulkIndexByScrollResponse extends ActionResponse implements ToXContent {
|
||||
private TimeValue took;
|
||||
private BulkByScrollTask.Status status;
|
||||
private List<Failure> indexingFailures;
|
||||
private List<ShardSearchFailure> searchFailures;
|
||||
|
||||
public BulkIndexByScrollResponse() {
|
||||
}
|
||||
|
||||
public BulkIndexByScrollResponse(TimeValue took, BulkByScrollTask.Status status, List<Failure> indexingFailures,
|
||||
List<ShardSearchFailure> searchFailures) {
|
||||
this.took = took;
|
||||
this.status = requireNonNull(status, "Null status not supported");
|
||||
this.indexingFailures = indexingFailures;
|
||||
this.searchFailures = searchFailures;
|
||||
}
|
||||
|
||||
public TimeValue getTook() {
|
||||
return took;
|
||||
}
|
||||
|
||||
protected BulkByScrollTask.Status getStatus() {
|
||||
return status;
|
||||
}
|
||||
|
||||
public long getUpdated() {
|
||||
return status.getUpdated();
|
||||
}
|
||||
|
||||
public int getBatches() {
|
||||
return status.getBatches();
|
||||
}
|
||||
|
||||
public long getVersionConflicts() {
|
||||
return status.getVersionConflicts();
|
||||
}
|
||||
|
||||
public long getNoops() {
|
||||
return status.getNoops();
|
||||
}
|
||||
|
||||
/**
|
||||
* The reason that the request was canceled or null if it hasn't been.
|
||||
*/
|
||||
public String getReasonCancelled() {
|
||||
return status.getReasonCancelled();
|
||||
}
|
||||
|
||||
/**
|
||||
* All of the indexing failures. Version conflicts are only included if the request sets abortOnVersionConflict to true (the
|
||||
* default).
|
||||
*/
|
||||
public List<Failure> getIndexingFailures() {
|
||||
return indexingFailures;
|
||||
}
|
||||
|
||||
/**
|
||||
* All search failures.
|
||||
*/
|
||||
public List<ShardSearchFailure> getSearchFailures() {
|
||||
return searchFailures;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
took.writeTo(out);
|
||||
status.writeTo(out);
|
||||
out.writeVInt(indexingFailures.size());
|
||||
for (Failure failure: indexingFailures) {
|
||||
failure.writeTo(out);
|
||||
}
|
||||
out.writeVInt(searchFailures.size());
|
||||
for (ShardSearchFailure failure: searchFailures) {
|
||||
failure.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
took = TimeValue.readTimeValue(in);
|
||||
status = new BulkByScrollTask.Status(in);
|
||||
int indexingFailuresCount = in.readVInt();
|
||||
List<Failure> indexingFailures = new ArrayList<>(indexingFailuresCount);
|
||||
for (int i = 0; i < indexingFailuresCount; i++) {
|
||||
indexingFailures.add(Failure.PROTOTYPE.readFrom(in));
|
||||
}
|
||||
this.indexingFailures = unmodifiableList(indexingFailures);
|
||||
int searchFailuresCount = in.readVInt();
|
||||
List<ShardSearchFailure> searchFailures = new ArrayList<>(searchFailuresCount);
|
||||
for (int i = 0; i < searchFailuresCount; i++) {
|
||||
searchFailures.add(readShardSearchFailure(in));
|
||||
}
|
||||
this.searchFailures = unmodifiableList(searchFailures);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field("took", took.millis());
|
||||
status.innerXContent(builder, params, false, false);
|
||||
builder.startArray("failures");
|
||||
for (Failure failure: indexingFailures) {
|
||||
builder.startObject();
|
||||
failure.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
}
|
||||
for (ShardSearchFailure failure: searchFailures) {
|
||||
builder.startObject();
|
||||
failure.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder builder = new StringBuilder();
|
||||
builder.append("BulkIndexByScrollResponse[");
|
||||
builder.append("took=").append(took).append(',');
|
||||
status.innerToString(builder, false, false);
|
||||
builder.append(",indexing_failures=").append(getIndexingFailures().subList(0, min(3, getIndexingFailures().size())));
|
||||
builder.append(",search_failures=").append(getSearchFailures().subList(0, min(3, getSearchFailures().size())));
|
||||
return builder.append(']').toString();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,46 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse.Failure;
|
||||
import org.elasticsearch.rest.RestChannel;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.rest.action.support.RestToXContentListener;
|
||||
|
||||
/**
|
||||
* Just like RestToXContentListener but will return higher than 200 status if
|
||||
* there are any failures.
|
||||
*/
|
||||
public class BulkIndexByScrollResponseContentListener<R extends BulkIndexByScrollResponse> extends RestToXContentListener<R> {
|
||||
public BulkIndexByScrollResponseContentListener(RestChannel channel) {
|
||||
super(channel);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected RestStatus getStatus(R response) {
|
||||
RestStatus status = RestStatus.OK;
|
||||
for (Failure failure : response.getIndexingFailures()) {
|
||||
if (failure.getStatus().getStatus() > status.getStatus()) {
|
||||
status = failure.getStatus();
|
||||
}
|
||||
}
|
||||
return status;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class ReindexAction extends Action<ReindexRequest, ReindexResponse, ReindexRequestBuilder> {
|
||||
public static final ReindexAction INSTANCE = new ReindexAction();
|
||||
public static final String NAME = "indices:data/write/reindex";
|
||||
|
||||
private ReindexAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReindexRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new ReindexRequestBuilder(client, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReindexResponse newResponse() {
|
||||
return new ReindexResponse();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,48 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.ActionModule;
|
||||
import org.elasticsearch.common.network.NetworkModule;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
|
||||
public class ReindexPlugin extends Plugin {
|
||||
public static final String NAME = "reindex";
|
||||
|
||||
@Override
|
||||
public String name() {
|
||||
return NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String description() {
|
||||
return "The Reindex module adds APIs to reindex from one index to another or update documents in place.";
|
||||
}
|
||||
|
||||
public void onModule(ActionModule actionModule) {
|
||||
actionModule.registerAction(ReindexAction.INSTANCE, TransportReindexAction.class);
|
||||
actionModule.registerAction(UpdateByQueryAction.INSTANCE, TransportUpdateByQueryAction.class);
|
||||
}
|
||||
|
||||
public void onModule(NetworkModule restModule) {
|
||||
restModule.registerRestHandler(RestReindexAction.class);
|
||||
restModule.registerRestHandler(RestUpdateByQueryAction.class);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,126 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
import static org.elasticsearch.index.VersionType.INTERNAL;
|
||||
|
||||
public class ReindexRequest extends AbstractBulkIndexByScrollRequest<ReindexRequest> {
|
||||
/**
|
||||
* Prototype for index requests.
|
||||
*/
|
||||
private IndexRequest destination;
|
||||
|
||||
public ReindexRequest() {
|
||||
}
|
||||
|
||||
public ReindexRequest(SearchRequest search, IndexRequest destination) {
|
||||
super(search);
|
||||
this.destination = destination;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ReindexRequest self() {
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
ActionRequestValidationException e = super.validate();
|
||||
if (getSearchRequest().indices() == null || getSearchRequest().indices().length == 0) {
|
||||
e = addValidationError("use _all if you really want to copy from all existing indexes", e);
|
||||
}
|
||||
/*
|
||||
* Note that we don't call index's validator - it won't work because
|
||||
* we'll be filling in portions of it as we receive the docs. But we can
|
||||
* validate some things so we do that below.
|
||||
*/
|
||||
if (destination.index() == null) {
|
||||
e = addValidationError("index must be specified", e);
|
||||
return e;
|
||||
}
|
||||
if (false == routingIsValid()) {
|
||||
e = addValidationError("routing must be unset, [keep], [discard] or [=<some new value>]", e);
|
||||
}
|
||||
if (destination.versionType() == INTERNAL) {
|
||||
if (destination.version() != Versions.MATCH_ANY && destination.version() != Versions.MATCH_DELETED) {
|
||||
e = addValidationError("unsupported version for internal versioning [" + destination.version() + ']', e);
|
||||
}
|
||||
}
|
||||
if (destination.ttl() != null) {
|
||||
e = addValidationError("setting ttl on destination isn't supported. use scripts instead.", e);
|
||||
}
|
||||
if (destination.timestamp() != null) {
|
||||
e = addValidationError("setting timestamp on destination isn't supported. use scripts instead.", e);
|
||||
}
|
||||
return e;
|
||||
}
|
||||
|
||||
private boolean routingIsValid() {
|
||||
if (destination.routing() == null || destination.routing().startsWith("=")) {
|
||||
return true;
|
||||
}
|
||||
switch (destination.routing()) {
|
||||
case "keep":
|
||||
case "discard":
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
public IndexRequest getDestination() {
|
||||
return destination;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
destination = new IndexRequest();
|
||||
destination.readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
destination.writeTo(out);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder b = new StringBuilder();
|
||||
b.append("reindex from ");
|
||||
searchToString(b);
|
||||
b.append(" to [").append(destination.index()).append(']');
|
||||
if (destination.type() != null) {
|
||||
b.append('[').append(destination.type()).append(']');
|
||||
}
|
||||
return b.toString();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,70 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.action.index.IndexAction;
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.action.search.SearchAction;
|
||||
import org.elasticsearch.action.search.SearchRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class ReindexRequestBuilder extends
|
||||
AbstractBulkIndexByScrollRequestBuilder<ReindexRequest, ReindexResponse, ReindexRequestBuilder> {
|
||||
private final IndexRequestBuilder destination;
|
||||
|
||||
public ReindexRequestBuilder(ElasticsearchClient client,
|
||||
Action<ReindexRequest, ReindexResponse, ReindexRequestBuilder> action) {
|
||||
this(client, action, new SearchRequestBuilder(client, SearchAction.INSTANCE),
|
||||
new IndexRequestBuilder(client, IndexAction.INSTANCE));
|
||||
}
|
||||
|
||||
private ReindexRequestBuilder(ElasticsearchClient client,
|
||||
Action<ReindexRequest, ReindexResponse, ReindexRequestBuilder> action,
|
||||
SearchRequestBuilder search, IndexRequestBuilder destination) {
|
||||
super(client, action, search, new ReindexRequest(search.request(), destination.request()));
|
||||
this.destination = destination;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ReindexRequestBuilder self() {
|
||||
return this;
|
||||
}
|
||||
|
||||
public IndexRequestBuilder destination() {
|
||||
return destination;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the destination index.
|
||||
*/
|
||||
public ReindexRequestBuilder destination(String index) {
|
||||
destination.setIndex(index);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the destination index and type.
|
||||
*/
|
||||
public ReindexRequestBuilder destination(String index, String type) {
|
||||
destination.setIndex(index).setType(type);
|
||||
return this;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,73 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse.Failure;
|
||||
import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.reindex.BulkByScrollTask.Status;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Response for the ReindexAction.
|
||||
*/
|
||||
public class ReindexResponse extends BulkIndexByScrollResponse {
|
||||
public ReindexResponse() {
|
||||
}
|
||||
|
||||
public ReindexResponse(TimeValue took, Status status, List<Failure> indexingFailures, List<ShardSearchFailure> searchFailures) {
|
||||
super(took, status, indexingFailures, searchFailures);
|
||||
}
|
||||
|
||||
public long getCreated() {
|
||||
return getStatus().getCreated();
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field("took", getTook());
|
||||
getStatus().innerXContent(builder, params, true, false);
|
||||
builder.startArray("failures");
|
||||
for (Failure failure: getIndexingFailures()) {
|
||||
builder.startObject();
|
||||
failure.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
}
|
||||
for (ShardSearchFailure failure: getSearchFailures()) {
|
||||
builder.startObject();
|
||||
failure.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder builder = new StringBuilder();
|
||||
builder.append("ReindexResponse[");
|
||||
builder.append("took=").append(getTook()).append(',');
|
||||
getStatus().innerToString(builder, true, false);
|
||||
return builder.append(']').toString();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,178 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.WriteConsistencyLevel;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.index.query.QueryParseContext;
|
||||
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
|
||||
import org.elasticsearch.rest.BytesRestResponse;
|
||||
import org.elasticsearch.rest.RestChannel;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.search.aggregations.AggregatorParsers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.common.unit.TimeValue.parseTimeValue;
|
||||
import static org.elasticsearch.rest.RestRequest.Method.POST;
|
||||
import static org.elasticsearch.rest.RestStatus.BAD_REQUEST;
|
||||
|
||||
/**
|
||||
* Expose IndexBySearchRequest over rest.
|
||||
*/
|
||||
public class RestReindexAction extends AbstractBaseReindexRestHandler<ReindexRequest, ReindexResponse, TransportReindexAction> {
|
||||
private static final ObjectParser<ReindexRequest, ReindexParseContext> PARSER = new ObjectParser<>("reindex");
|
||||
static {
|
||||
ObjectParser.Parser<SearchRequest, ReindexParseContext> sourceParser = (parser, search, context) -> {
|
||||
/*
|
||||
* Extract the parameters that we need from the parser. We could do
|
||||
* away with this hack when search source has an ObjectParser.
|
||||
*/
|
||||
Map<String, Object> source = parser.map();
|
||||
String[] indices = extractStringArray(source, "index");
|
||||
if (indices != null) {
|
||||
search.indices(indices);
|
||||
}
|
||||
String[] types = extractStringArray(source, "type");
|
||||
if (types != null) {
|
||||
search.types(types);
|
||||
}
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(parser.contentType());
|
||||
builder.map(source);
|
||||
parser = parser.contentType().xContent().createParser(builder.bytes());
|
||||
context.queryParseContext.reset(parser);
|
||||
search.source().parseXContent(parser, context.queryParseContext, context.aggParsers);
|
||||
};
|
||||
|
||||
ObjectParser<IndexRequest, Void> destParser = new ObjectParser<>("dest");
|
||||
destParser.declareString(IndexRequest::index, new ParseField("index"));
|
||||
destParser.declareString(IndexRequest::type, new ParseField("type"));
|
||||
destParser.declareString(IndexRequest::routing, new ParseField("routing"));
|
||||
destParser.declareString(IndexRequest::opType, new ParseField("opType"));
|
||||
destParser.declareString((s, i) -> s.versionType(VersionType.fromString(i)), new ParseField("versionType"));
|
||||
|
||||
// These exist just so the user can get a nice validation error:
|
||||
destParser.declareString(IndexRequest::timestamp, new ParseField("timestamp"));
|
||||
destParser.declareString((i, ttl) -> i.ttl(parseTimeValue(ttl, TimeValue.timeValueMillis(-1), "ttl").millis()),
|
||||
new ParseField("ttl"));
|
||||
|
||||
PARSER.declareField((p, v, c) -> sourceParser.parse(p, v.getSearchRequest(), c), new ParseField("source"), ValueType.OBJECT);
|
||||
PARSER.declareField((p, v, c) -> destParser.parse(p, v.getDestination(), null), new ParseField("dest"), ValueType.OBJECT);
|
||||
PARSER.declareInt(ReindexRequest::setSize, new ParseField("size"));
|
||||
PARSER.declareField((p, v, c) -> v.setScript(Script.parse(p, c.queryParseContext.parseFieldMatcher())), new ParseField("script"),
|
||||
ValueType.OBJECT);
|
||||
PARSER.declareString(ReindexRequest::setConflicts, new ParseField("conflicts"));
|
||||
}
|
||||
|
||||
@Inject
|
||||
public RestReindexAction(Settings settings, RestController controller, Client client,
|
||||
IndicesQueriesRegistry indicesQueriesRegistry, AggregatorParsers aggParsers, ClusterService clusterService,
|
||||
TransportReindexAction action) {
|
||||
super(settings, client, indicesQueriesRegistry, aggParsers, clusterService, action);
|
||||
controller.registerHandler(POST, "/_reindex", this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleRequest(RestRequest request, RestChannel channel, Client client) throws IOException {
|
||||
if (false == request.hasContent()) {
|
||||
badRequest(channel, "body required");
|
||||
return;
|
||||
}
|
||||
|
||||
ReindexRequest internalRequest = new ReindexRequest(new SearchRequest(), new IndexRequest());
|
||||
|
||||
try (XContentParser xcontent = XContentFactory.xContent(request.content()).createParser(request.content())) {
|
||||
PARSER.parse(xcontent, internalRequest, new ReindexParseContext(new QueryParseContext(indicesQueriesRegistry), aggParsers));
|
||||
} catch (ParsingException e) {
|
||||
logger.warn("Bad request", e);
|
||||
badRequest(channel, e.getDetailedMessage());
|
||||
return;
|
||||
}
|
||||
parseCommon(internalRequest, request);
|
||||
|
||||
execute(request, internalRequest, channel);
|
||||
}
|
||||
|
||||
private void badRequest(RestChannel channel, String message) {
|
||||
try {
|
||||
XContentBuilder builder = channel.newErrorBuilder();
|
||||
channel.sendResponse(new BytesRestResponse(BAD_REQUEST, builder.startObject().field("error", message).endObject()));
|
||||
} catch (IOException e) {
|
||||
logger.warn("Failed to send response", e);
|
||||
}
|
||||
}
|
||||
|
||||
public static void parseCommon(AbstractBulkByScrollRequest<?> internalRequest, RestRequest request) {
|
||||
internalRequest.setRefresh(request.paramAsBoolean("refresh", internalRequest.isRefresh()));
|
||||
internalRequest.setTimeout(request.paramAsTime("timeout", internalRequest.getTimeout()));
|
||||
String consistency = request.param("consistency");
|
||||
if (consistency != null) {
|
||||
internalRequest.setConsistency(WriteConsistencyLevel.fromString(consistency));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Yank a string array from a map. Emulates XContent's permissive String to
|
||||
* String array conversions.
|
||||
*/
|
||||
private static String[] extractStringArray(Map<String, Object> source, String name) {
|
||||
Object value = source.remove(name);
|
||||
if (value == null) {
|
||||
return null;
|
||||
}
|
||||
if (value instanceof List) {
|
||||
@SuppressWarnings("unchecked")
|
||||
List<String> list = (List<String>) value;
|
||||
return list.toArray(new String[list.size()]);
|
||||
} else if (value instanceof String) {
|
||||
return new String[] {(String) value};
|
||||
} else {
|
||||
throw new IllegalArgumentException("Expected [" + name + "] to be a list of a string but was [" + value + ']');
|
||||
}
|
||||
}
|
||||
|
||||
private class ReindexParseContext {
|
||||
private final QueryParseContext queryParseContext;
|
||||
private final AggregatorParsers aggParsers;
|
||||
|
||||
public ReindexParseContext(QueryParseContext queryParseContext, AggregatorParsers aggParsers) {
|
||||
this.queryParseContext = queryParseContext;
|
||||
this.aggParsers = aggParsers;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,112 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
|
||||
import org.elasticsearch.rest.RestChannel;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.rest.action.search.RestSearchAction;
|
||||
import org.elasticsearch.rest.action.support.RestActions;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.search.aggregations.AggregatorParsers;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.index.reindex.AbstractBulkByScrollRequest.SIZE_ALL_MATCHES;
|
||||
import static org.elasticsearch.index.reindex.RestReindexAction.parseCommon;
|
||||
import static org.elasticsearch.rest.RestRequest.Method.POST;
|
||||
|
||||
public class RestUpdateByQueryAction extends
|
||||
AbstractBaseReindexRestHandler<UpdateByQueryRequest, BulkIndexByScrollResponse, TransportUpdateByQueryAction> {
|
||||
@Inject
|
||||
public RestUpdateByQueryAction(Settings settings, RestController controller, Client client,
|
||||
IndicesQueriesRegistry indicesQueriesRegistry, AggregatorParsers aggParsers, ClusterService clusterService,
|
||||
TransportUpdateByQueryAction action) {
|
||||
super(settings, client, indicesQueriesRegistry, aggParsers, clusterService, action);
|
||||
controller.registerHandler(POST, "/{index}/_update_by_query", this);
|
||||
controller.registerHandler(POST, "/{index}/{type}/_update_by_query", this);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void handleRequest(RestRequest request, RestChannel channel, Client client) throws Exception {
|
||||
/*
|
||||
* Passing the search request through UpdateByQueryRequest first allows
|
||||
* it to set its own defaults which differ from SearchRequest's
|
||||
* defaults. Then the parse can override them.
|
||||
*/
|
||||
UpdateByQueryRequest internalRequest = new UpdateByQueryRequest(new SearchRequest());
|
||||
int scrollSize = internalRequest.getSearchRequest().source().size();
|
||||
internalRequest.getSearchRequest().source().size(SIZE_ALL_MATCHES);
|
||||
/*
|
||||
* We can't send parseSearchRequest REST content that it doesn't support
|
||||
* so we will have to remove the content that is valid in addition to
|
||||
* what it supports from the content first. This is a temporary hack and
|
||||
* should get better when SearchRequest has full ObjectParser support
|
||||
* then we can delegate and stuff.
|
||||
*/
|
||||
BytesReference bodyContent = null;
|
||||
if (RestActions.hasBodyContent(request)) {
|
||||
bodyContent = RestActions.getRestContent(request);
|
||||
Tuple<XContentType, Map<String, Object>> body = XContentHelper.convertToMap(bodyContent, false);
|
||||
boolean modified = false;
|
||||
String conflicts = (String) body.v2().remove("conflicts");
|
||||
if (conflicts != null) {
|
||||
internalRequest.setConflicts(conflicts);
|
||||
modified = true;
|
||||
}
|
||||
@SuppressWarnings("unchecked")
|
||||
Map<String, Object> script = (Map<String, Object>) body.v2().remove("script");
|
||||
if (script != null) {
|
||||
internalRequest.setScript(Script.parse(script, false, parseFieldMatcher));
|
||||
modified = true;
|
||||
}
|
||||
if (modified) {
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(body.v1());
|
||||
builder.map(body.v2());
|
||||
bodyContent = builder.bytes();
|
||||
}
|
||||
}
|
||||
RestSearchAction.parseSearchRequest(internalRequest.getSearchRequest(), indicesQueriesRegistry, request,
|
||||
parseFieldMatcher, aggParsers, bodyContent);
|
||||
|
||||
String conflicts = request.param("conflicts");
|
||||
if (conflicts != null) {
|
||||
internalRequest.setConflicts(conflicts);
|
||||
}
|
||||
parseCommon(internalRequest, request);
|
||||
|
||||
internalRequest.setSize(internalRequest.getSearchRequest().source().size());
|
||||
internalRequest.getSearchRequest().source().size(request.paramAsInt("scroll_size", scrollSize));
|
||||
|
||||
execute(request, internalRequest, channel);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,273 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse.Failure;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.AutoCreateIndex;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.mapper.internal.TTLFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.VersionFieldMapper;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
import static java.util.Objects.requireNonNull;
|
||||
import static org.elasticsearch.index.VersionType.INTERNAL;
|
||||
|
||||
public class TransportReindexAction extends HandledTransportAction<ReindexRequest, ReindexResponse> {
|
||||
private final ClusterService clusterService;
|
||||
private final ScriptService scriptService;
|
||||
private final AutoCreateIndex autoCreateIndex;
|
||||
private final Client client;
|
||||
|
||||
@Inject
|
||||
public TransportReindexAction(Settings settings, ThreadPool threadPool, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, ClusterService clusterService, ScriptService scriptService,
|
||||
AutoCreateIndex autoCreateIndex, Client client, TransportService transportService) {
|
||||
super(settings, ReindexAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver,
|
||||
ReindexRequest::new);
|
||||
this.clusterService = clusterService;
|
||||
this.scriptService = scriptService;
|
||||
this.autoCreateIndex = autoCreateIndex;
|
||||
this.client = client;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(Task task, ReindexRequest request, ActionListener<ReindexResponse> listener) {
|
||||
validateAgainstAliases(request.getSearchRequest(), request.getDestination(), indexNameExpressionResolver, autoCreateIndex,
|
||||
clusterService.state());
|
||||
new AsyncIndexBySearchAction((BulkByScrollTask) task, logger, scriptService, client, threadPool, request, listener).start();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(ReindexRequest request, ActionListener<ReindexResponse> listener) {
|
||||
throw new UnsupportedOperationException("task required");
|
||||
}
|
||||
|
||||
/**
|
||||
* Throws an ActionRequestValidationException if the request tries to index
|
||||
* back into the same index or into an index that points to two indexes.
|
||||
* This cannot be done during request validation because the cluster state
|
||||
* isn't available then. Package private for testing.
|
||||
*/
|
||||
static String validateAgainstAliases(SearchRequest source, IndexRequest destination,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, AutoCreateIndex autoCreateIndex, ClusterState clusterState) {
|
||||
String target = destination.index();
|
||||
if (false == autoCreateIndex.shouldAutoCreate(target, clusterState)) {
|
||||
/*
|
||||
* If we're going to autocreate the index we don't need to resolve
|
||||
* it. This is the same sort of dance that TransportIndexRequest
|
||||
* uses to decide to autocreate the index.
|
||||
*/
|
||||
target = indexNameExpressionResolver.concreteIndices(clusterState, destination)[0];
|
||||
}
|
||||
for (String sourceIndex: indexNameExpressionResolver.concreteIndices(clusterState, source)) {
|
||||
if (sourceIndex.equals(target)) {
|
||||
ActionRequestValidationException e = new ActionRequestValidationException();
|
||||
e.addValidationError("reindex cannot write into an index its reading from [" + target + ']');
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
return target;
|
||||
}
|
||||
|
||||
/**
|
||||
* Simple implementation of reindex using scrolling and bulk. There are tons
|
||||
* of optimizations that can be done on certain types of reindex requests
|
||||
* but this makes no attempt to do any of them so it can be as simple
|
||||
* possible.
|
||||
*/
|
||||
static class AsyncIndexBySearchAction extends AbstractAsyncBulkIndexByScrollAction<ReindexRequest, ReindexResponse> {
|
||||
public AsyncIndexBySearchAction(BulkByScrollTask task, ESLogger logger, ScriptService scriptService, Client client,
|
||||
ThreadPool threadPool, ReindexRequest request, ActionListener<ReindexResponse> listener) {
|
||||
super(task, logger, scriptService, client, threadPool, request, request.getSearchRequest(), listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected IndexRequest buildIndexRequest(SearchHit doc) {
|
||||
IndexRequest index = new IndexRequest();
|
||||
|
||||
// Copy the index from the request so we always write where it asked to write
|
||||
index.index(mainRequest.getDestination().index());
|
||||
|
||||
// If the request override's type then the user wants all documents in that type. Otherwise keep the doc's type.
|
||||
if (mainRequest.getDestination().type() == null) {
|
||||
index.type(doc.type());
|
||||
} else {
|
||||
index.type(mainRequest.getDestination().type());
|
||||
}
|
||||
|
||||
/*
|
||||
* Internal versioning can just use what we copied from the destination request. Otherwise we assume we're using external
|
||||
* versioning and use the doc's version.
|
||||
*/
|
||||
index.versionType(mainRequest.getDestination().versionType());
|
||||
if (index.versionType() == INTERNAL) {
|
||||
index.version(mainRequest.getDestination().version());
|
||||
} else {
|
||||
index.version(doc.version());
|
||||
}
|
||||
|
||||
// id and source always come from the found doc. Scripts can change them but they operate on the index request.
|
||||
index.id(doc.id());
|
||||
index.source(doc.sourceRef());
|
||||
|
||||
/*
|
||||
* The rest of the index request just has to be copied from the template. It may be changed later from scripts or the superclass
|
||||
* here on out operates on the index request rather than the template.
|
||||
*/
|
||||
index.routing(mainRequest.getDestination().routing());
|
||||
index.parent(mainRequest.getDestination().parent());
|
||||
index.timestamp(mainRequest.getDestination().timestamp());
|
||||
index.ttl(mainRequest.getDestination().ttl());
|
||||
index.contentType(mainRequest.getDestination().getContentType());
|
||||
// OpType is synthesized from version so it is handled when we copy version above.
|
||||
|
||||
return index;
|
||||
}
|
||||
|
||||
/**
|
||||
* Override the simple copy behavior to allow more fine grained control.
|
||||
*/
|
||||
@Override
|
||||
protected void copyRouting(IndexRequest index, SearchHit doc) {
|
||||
String routingSpec = mainRequest.getDestination().routing();
|
||||
if (routingSpec == null) {
|
||||
super.copyRouting(index, doc);
|
||||
return;
|
||||
}
|
||||
if (routingSpec.startsWith("=")) {
|
||||
index.routing(mainRequest.getDestination().routing().substring(1));
|
||||
return;
|
||||
}
|
||||
switch (routingSpec) {
|
||||
case "keep":
|
||||
super.copyRouting(index, doc);
|
||||
break;
|
||||
case "discard":
|
||||
index.routing(null);
|
||||
break;
|
||||
default:
|
||||
throw new IllegalArgumentException("Unsupported routing command");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ReindexResponse buildResponse(TimeValue took, List<Failure> indexingFailures, List<ShardSearchFailure> searchFailures) {
|
||||
return new ReindexResponse(took, task.getStatus(), indexingFailures, searchFailures);
|
||||
}
|
||||
|
||||
/*
|
||||
* Methods below here handle script updating the index request. They try
|
||||
* to be pretty liberal with regards to types because script are often
|
||||
* dynamically typed.
|
||||
*/
|
||||
@Override
|
||||
protected void scriptChangedIndex(IndexRequest index, Object to) {
|
||||
requireNonNull(to, "Can't reindex without a destination index!");
|
||||
index.index(to.toString());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void scriptChangedType(IndexRequest index, Object to) {
|
||||
requireNonNull(to, "Can't reindex without a destination type!");
|
||||
index.type(to.toString());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void scriptChangedId(IndexRequest index, Object to) {
|
||||
index.id(Objects.toString(to, null));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void scriptChangedVersion(IndexRequest index, Object to) {
|
||||
if (to == null) {
|
||||
index.version(Versions.MATCH_ANY).versionType(INTERNAL);
|
||||
return;
|
||||
}
|
||||
index.version(asLong(to, VersionFieldMapper.NAME));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void scriptChangedParent(IndexRequest index, Object to) {
|
||||
// Have to override routing with parent just in case its changed
|
||||
String routing = Objects.toString(to, null);
|
||||
index.parent(routing).routing(routing);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void scriptChangedRouting(IndexRequest index, Object to) {
|
||||
index.routing(Objects.toString(to, null));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void scriptChangedTimestamp(IndexRequest index, Object to) {
|
||||
index.timestamp(Objects.toString(to, null));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void scriptChangedTTL(IndexRequest index, Object to) {
|
||||
if (to == null) {
|
||||
index.ttl((TimeValue) null);
|
||||
return;
|
||||
}
|
||||
index.ttl(asLong(to, TTLFieldMapper.NAME));
|
||||
}
|
||||
|
||||
private long asLong(Object from, String name) {
|
||||
/*
|
||||
* Stuffing a number into the map will have converted it to
|
||||
* some Number.
|
||||
*/
|
||||
Number fromNumber;
|
||||
try {
|
||||
fromNumber = (Number) from;
|
||||
} catch (ClassCastException e) {
|
||||
throw new IllegalArgumentException(name + " may only be set to an int or a long but was [" + from + "]", e);
|
||||
}
|
||||
long l = fromNumber.longValue();
|
||||
// Check that we didn't round when we fetched the value.
|
||||
if (fromNumber.doubleValue() != l) {
|
||||
throw new IllegalArgumentException(name + " may only be set to an int or a long but was [" + from + "]");
|
||||
}
|
||||
return l;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,142 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse.Failure;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.index.mapper.internal.IdFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.IndexFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.RoutingFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.TTLFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.TimestampFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public class TransportUpdateByQueryAction extends HandledTransportAction<UpdateByQueryRequest, BulkIndexByScrollResponse> {
|
||||
private final Client client;
|
||||
private final ScriptService scriptService;
|
||||
|
||||
@Inject
|
||||
public TransportUpdateByQueryAction(Settings settings, ThreadPool threadPool, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, Client client, TransportService transportService,
|
||||
ScriptService scriptService) {
|
||||
super(settings, UpdateByQueryAction.NAME, threadPool, transportService, actionFilters,
|
||||
indexNameExpressionResolver, UpdateByQueryRequest::new);
|
||||
this.client = client;
|
||||
this.scriptService = scriptService;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(Task task, UpdateByQueryRequest request,
|
||||
ActionListener<BulkIndexByScrollResponse> listener) {
|
||||
new AsyncIndexBySearchAction((BulkByScrollTask) task, logger, scriptService, client, threadPool, request, listener).start();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(UpdateByQueryRequest request, ActionListener<BulkIndexByScrollResponse> listener) {
|
||||
throw new UnsupportedOperationException("task required");
|
||||
}
|
||||
|
||||
/**
|
||||
* Simple implementation of update-by-query using scrolling and bulk.
|
||||
*/
|
||||
static class AsyncIndexBySearchAction extends AbstractAsyncBulkIndexByScrollAction<UpdateByQueryRequest, BulkIndexByScrollResponse> {
|
||||
public AsyncIndexBySearchAction(BulkByScrollTask task, ESLogger logger, ScriptService scriptService, Client client,
|
||||
ThreadPool threadPool, UpdateByQueryRequest request, ActionListener<BulkIndexByScrollResponse> listener) {
|
||||
super(task, logger, scriptService, client, threadPool, request, request.getSearchRequest(), listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected IndexRequest buildIndexRequest(SearchHit doc) {
|
||||
IndexRequest index = new IndexRequest();
|
||||
index.index(doc.index());
|
||||
index.type(doc.type());
|
||||
index.id(doc.id());
|
||||
index.source(doc.sourceRef());
|
||||
index.versionType(VersionType.INTERNAL);
|
||||
index.version(doc.version());
|
||||
return index;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected BulkIndexByScrollResponse buildResponse(TimeValue took, List<Failure> indexingFailures,
|
||||
List<ShardSearchFailure> searchFailures) {
|
||||
return new BulkIndexByScrollResponse(took, task.getStatus(), indexingFailures, searchFailures);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void scriptChangedIndex(IndexRequest index, Object to) {
|
||||
throw new IllegalArgumentException("Modifying [" + IndexFieldMapper.NAME + "] not allowed");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void scriptChangedType(IndexRequest index, Object to) {
|
||||
throw new IllegalArgumentException("Modifying [" + TypeFieldMapper.NAME + "] not allowed");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void scriptChangedId(IndexRequest index, Object to) {
|
||||
throw new IllegalArgumentException("Modifying [" + IdFieldMapper.NAME + "] not allowed");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void scriptChangedVersion(IndexRequest index, Object to) {
|
||||
throw new IllegalArgumentException("Modifying [_version] not allowed");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void scriptChangedRouting(IndexRequest index, Object to) {
|
||||
throw new IllegalArgumentException("Modifying [" + RoutingFieldMapper.NAME + "] not allowed");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void scriptChangedParent(IndexRequest index, Object to) {
|
||||
throw new IllegalArgumentException("Modifying [" + ParentFieldMapper.NAME + "] not allowed");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void scriptChangedTimestamp(IndexRequest index, Object to) {
|
||||
throw new IllegalArgumentException("Modifying [" + TimestampFieldMapper.NAME + "] not allowed");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void scriptChangedTTL(IndexRequest index, Object to) {
|
||||
throw new IllegalArgumentException("Modifying [" + TTLFieldMapper.NAME + "] not allowed");
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class UpdateByQueryAction extends
|
||||
Action<UpdateByQueryRequest, BulkIndexByScrollResponse, UpdateByQueryRequestBuilder> {
|
||||
public static final UpdateByQueryAction INSTANCE = new UpdateByQueryAction();
|
||||
public static final String NAME = "indices:data/write/update/byquery";
|
||||
|
||||
private UpdateByQueryAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public UpdateByQueryRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new UpdateByQueryRequestBuilder(client, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public BulkIndexByScrollResponse newResponse() {
|
||||
return new BulkIndexByScrollResponse();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,48 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
|
||||
/**
|
||||
* Request to reindex a set of documents where they are without changing their
|
||||
* locations or IDs.
|
||||
*/
|
||||
public class UpdateByQueryRequest extends AbstractBulkIndexByScrollRequest<UpdateByQueryRequest> {
|
||||
public UpdateByQueryRequest() {
|
||||
}
|
||||
|
||||
public UpdateByQueryRequest(SearchRequest search) {
|
||||
super(search);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected UpdateByQueryRequest self() {
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder b = new StringBuilder();
|
||||
b.append("update-by-query ");
|
||||
searchToString(b);
|
||||
return b.toString();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,51 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.action.search.SearchAction;
|
||||
import org.elasticsearch.action.search.SearchRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class UpdateByQueryRequestBuilder extends
|
||||
AbstractBulkIndexByScrollRequestBuilder<UpdateByQueryRequest, BulkIndexByScrollResponse, UpdateByQueryRequestBuilder> {
|
||||
|
||||
public UpdateByQueryRequestBuilder(ElasticsearchClient client,
|
||||
Action<UpdateByQueryRequest, BulkIndexByScrollResponse, UpdateByQueryRequestBuilder> action) {
|
||||
this(client, action, new SearchRequestBuilder(client, SearchAction.INSTANCE));
|
||||
}
|
||||
|
||||
private UpdateByQueryRequestBuilder(ElasticsearchClient client,
|
||||
Action<UpdateByQueryRequest, BulkIndexByScrollResponse, UpdateByQueryRequestBuilder> action,
|
||||
SearchRequestBuilder search) {
|
||||
super(client, action, search, new UpdateByQueryRequest(search.request()));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected UpdateByQueryRequestBuilder self() {
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public UpdateByQueryRequestBuilder abortOnVersionConflict(boolean abortOnVersionConflict) {
|
||||
request.setAbortOnVersionConflict(abortOnVersionConflict);
|
||||
return this;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,68 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.common.text.Text;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.script.ExecutableScript;
|
||||
import org.elasticsearch.search.SearchHitField;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.internal.InternalSearchHit;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
import static java.util.Collections.singletonMap;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
public abstract class AbstractAsyncBulkIndexByScrollActionScriptTestCase<
|
||||
Request extends AbstractBulkIndexByScrollRequest<Request>,
|
||||
Response extends BulkIndexByScrollResponse>
|
||||
extends AbstractAsyncBulkIndexByScrollActionTestCase<Request, Response> {
|
||||
protected IndexRequest applyScript(Consumer<Map<String, Object>> scriptBody) {
|
||||
IndexRequest index = new IndexRequest("index", "type", "1").source(singletonMap("foo", "bar"));
|
||||
Map<String, SearchHitField> fields = new HashMap<>();
|
||||
InternalSearchHit doc = new InternalSearchHit(0, "id", new Text("type"), fields);
|
||||
doc.shardTarget(new SearchShardTarget("nodeid", new Index("index", "uuid"), 1));
|
||||
ExecutableScript script = new SimpleExecutableScript(scriptBody);
|
||||
action().applyScript(index, doc, script, new HashMap<>());
|
||||
return index;
|
||||
}
|
||||
|
||||
public void testScriptAddingJunkToCtxIsError() {
|
||||
try {
|
||||
applyScript((Map<String, Object> ctx) -> ctx.put("junk", "junk"));
|
||||
fail("Expected error");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), equalTo("Invalid fields added to ctx [junk]"));
|
||||
}
|
||||
}
|
||||
|
||||
public void testChangeSource() {
|
||||
IndexRequest index = applyScript((Map<String, Object> ctx) -> {
|
||||
@SuppressWarnings("unchecked")
|
||||
Map<String, Object> source = (Map<String, Object>) ctx.get("_source");
|
||||
source.put("bar", "cat");
|
||||
});
|
||||
assertEquals("cat", index.sourceAsMap().get("bar"));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,55 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.support.PlainActionFuture;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
public abstract class AbstractAsyncBulkIndexByScrollActionTestCase<
|
||||
Request extends AbstractBulkIndexByScrollRequest<Request>,
|
||||
Response extends BulkIndexByScrollResponse>
|
||||
extends ESTestCase {
|
||||
protected ThreadPool threadPool;
|
||||
protected BulkByScrollTask task;
|
||||
|
||||
@Before
|
||||
public void setupForTest() {
|
||||
threadPool = new ThreadPool(getTestName());
|
||||
task = new BulkByScrollTask(1, "test", "test", "test");
|
||||
}
|
||||
|
||||
@After
|
||||
@Override
|
||||
public void tearDown() throws Exception {
|
||||
super.tearDown();
|
||||
threadPool.shutdown();
|
||||
}
|
||||
|
||||
protected abstract AbstractAsyncBulkIndexByScrollAction<Request, Response> action();
|
||||
|
||||
protected abstract Request request();
|
||||
|
||||
protected PlainActionFuture<Response> listener() {
|
||||
return new PlainActionFuture<>();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,61 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.common.text.Text;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.mapper.internal.TTLFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.TimestampFieldMapper;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.internal.InternalSearchHit;
|
||||
import org.elasticsearch.search.internal.InternalSearchHitField;
|
||||
|
||||
import static java.util.Collections.singletonList;
|
||||
import static java.util.Collections.singletonMap;
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
|
||||
|
||||
public abstract class AbstractAsyncBulkIndexbyScrollActionMetadataTestCase<
|
||||
Request extends AbstractBulkIndexByScrollRequest<Request>,
|
||||
Response extends BulkIndexByScrollResponse>
|
||||
extends AbstractAsyncBulkIndexByScrollActionTestCase<Request, Response> {
|
||||
|
||||
/**
|
||||
* Create a doc with some metadata.
|
||||
*/
|
||||
protected InternalSearchHit doc(String field, Object value) {
|
||||
InternalSearchHit doc = new InternalSearchHit(0, "id", new Text("type"), singletonMap(field,
|
||||
new InternalSearchHitField(field, singletonList(value))));
|
||||
doc.shardTarget(new SearchShardTarget("node", new Index("index", "uuid"), 0));
|
||||
return doc;
|
||||
}
|
||||
|
||||
public void testTimestampIsCopied() {
|
||||
IndexRequest index = new IndexRequest();
|
||||
action().copyMetadata(index, doc(TimestampFieldMapper.NAME, 10L));
|
||||
assertEquals("10", index.timestamp());
|
||||
}
|
||||
|
||||
public void testTTL() throws Exception {
|
||||
IndexRequest index = new IndexRequest();
|
||||
action().copyMetadata(index, doc(TTLFieldMapper.NAME, 10L));
|
||||
assertEquals(timeValueMillis(10), index.ttl());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,122 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.hamcrest.Description;
|
||||
import org.hamcrest.Matcher;
|
||||
import org.hamcrest.TypeSafeMatcher;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
|
||||
public abstract class AbstractBulkIndexByScrollResponseMatcher<
|
||||
Response extends BulkIndexByScrollResponse,
|
||||
Self extends AbstractBulkIndexByScrollResponseMatcher<Response, Self>>
|
||||
extends TypeSafeMatcher<Response> {
|
||||
private Matcher<Long> updatedMatcher = equalTo(0L);
|
||||
/**
|
||||
* Matches for number of batches. Optional.
|
||||
*/
|
||||
private Matcher<Integer> batchesMatcher;
|
||||
private Matcher<Long> versionConflictsMatcher = equalTo(0L);
|
||||
private Matcher<Integer> failuresMatcher = equalTo(0);
|
||||
private Matcher<String> reasonCancelledMatcher = nullValue(String.class);
|
||||
|
||||
protected abstract Self self();
|
||||
|
||||
public Self updated(Matcher<Long> updatedMatcher) {
|
||||
this.updatedMatcher = updatedMatcher;
|
||||
return self();
|
||||
}
|
||||
|
||||
public Self updated(long updated) {
|
||||
return updated(equalTo(updated));
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the matches for the number of batches. Defaults to matching any
|
||||
* integer because we usually don't care about how many batches the job
|
||||
* takes.
|
||||
*/
|
||||
public Self batches(Matcher<Integer> batchesMatcher) {
|
||||
this.batchesMatcher = batchesMatcher;
|
||||
return self();
|
||||
}
|
||||
|
||||
public Self batches(int batches) {
|
||||
return batches(equalTo(batches));
|
||||
}
|
||||
|
||||
public Self batches(int total, int batchSize) {
|
||||
// Round up
|
||||
return batches((total + batchSize - 1) / batchSize);
|
||||
}
|
||||
|
||||
public Self versionConflicts(Matcher<Long> versionConflictsMatcher) {
|
||||
this.versionConflictsMatcher = versionConflictsMatcher;
|
||||
return self();
|
||||
}
|
||||
|
||||
public Self versionConflicts(long versionConflicts) {
|
||||
return versionConflicts(equalTo(versionConflicts));
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the matcher for the size of the failures list. For more in depth
|
||||
* matching do it by hand. The type signatures required to match the
|
||||
* actual failures list here just don't work.
|
||||
*/
|
||||
public Self failures(Matcher<Integer> failuresMatcher) {
|
||||
this.failuresMatcher = failuresMatcher;
|
||||
return self();
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the expected size of the failures list.
|
||||
*/
|
||||
public Self failures(int failures) {
|
||||
return failures(equalTo(failures));
|
||||
}
|
||||
|
||||
public Self reasonCancelled(Matcher<String> reasonCancelledMatcher) {
|
||||
this.reasonCancelledMatcher = reasonCancelledMatcher;
|
||||
return self();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean matchesSafely(Response item) {
|
||||
return updatedMatcher.matches(item.getUpdated()) &&
|
||||
(batchesMatcher == null || batchesMatcher.matches(item.getBatches())) &&
|
||||
versionConflictsMatcher.matches(item.getVersionConflicts()) &&
|
||||
failuresMatcher.matches(item.getIndexingFailures().size()) &&
|
||||
reasonCancelledMatcher.matches(item.getReasonCancelled());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void describeTo(Description description) {
|
||||
description.appendText("indexed matches ").appendDescriptionOf(updatedMatcher);
|
||||
if (batchesMatcher != null) {
|
||||
description.appendText(" and batches matches ").appendDescriptionOf(batchesMatcher);
|
||||
}
|
||||
description.appendText(" and versionConflicts matches ").appendDescriptionOf(versionConflictsMatcher);
|
||||
description.appendText(" and failures size matches ").appendDescriptionOf(failuresMatcher);
|
||||
description.appendText(" and reason cancelled matches ").appendDescriptionOf(reasonCancelledMatcher);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,511 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.DocWriteResponse;
|
||||
import org.elasticsearch.action.bulk.BackoffPolicy;
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse;
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse.Failure;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.bulk.BulkResponse;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.action.delete.DeleteResponse;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.action.search.ClearScrollRequest;
|
||||
import org.elasticsearch.action.search.ClearScrollResponse;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.elasticsearch.action.support.PlainActionFuture;
|
||||
import org.elasticsearch.action.support.replication.ReplicationRequest;
|
||||
import org.elasticsearch.action.update.UpdateRequest;
|
||||
import org.elasticsearch.action.update.UpdateResponse;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.client.FilterClient;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.text.Text;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.engine.VersionConflictEngineException;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.internal.InternalSearchHit;
|
||||
import org.elasticsearch.search.internal.InternalSearchHits;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.tasks.TaskManager;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.client.NoOpClient;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
import static java.util.Collections.emptyList;
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static org.apache.lucene.util.TestUtil.randomSimpleString;
|
||||
import static org.elasticsearch.action.bulk.BackoffPolicy.constantBackoff;
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
|
||||
import static org.hamcrest.Matchers.contains;
|
||||
import static org.hamcrest.Matchers.emptyCollectionOf;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
|
||||
public class AsyncBulkByScrollActionTests extends ESTestCase {
|
||||
private MyMockClient client;
|
||||
private ThreadPool threadPool;
|
||||
private DummyAbstractBulkByScrollRequest mainRequest;
|
||||
private SearchRequest firstSearchRequest;
|
||||
private PlainActionFuture<BulkIndexByScrollResponse> listener;
|
||||
private String scrollId;
|
||||
private TaskManager taskManager;
|
||||
private BulkByScrollTask task;
|
||||
|
||||
@Before
|
||||
public void setupForTest() {
|
||||
client = new MyMockClient(new NoOpClient(getTestName()));
|
||||
threadPool = new ThreadPool(getTestName());
|
||||
mainRequest = new DummyAbstractBulkByScrollRequest();
|
||||
firstSearchRequest = null;
|
||||
listener = new PlainActionFuture<>();
|
||||
scrollId = null;
|
||||
taskManager = new TaskManager(Settings.EMPTY);
|
||||
task = (BulkByScrollTask) taskManager.register("don'tcare", "hereeither", mainRequest);
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDownAndVerifyCommonStuff() {
|
||||
client.close();
|
||||
threadPool.shutdown();
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates a random scrollId and registers it so that when the test
|
||||
* finishes we check that it was cleared. Subsequent calls reregister a new
|
||||
* random scroll id so it is checked instead.
|
||||
*/
|
||||
private String scrollId() {
|
||||
scrollId = randomSimpleString(random(), 1, 1000); // Empty string's get special behavior we don't want
|
||||
return scrollId;
|
||||
}
|
||||
|
||||
public void testScrollResponseSetsTotal() {
|
||||
// Default is 0, meaning unstarted
|
||||
assertEquals(0, task.getStatus().getTotal());
|
||||
|
||||
long total = randomIntBetween(0, Integer.MAX_VALUE);
|
||||
InternalSearchHits hits = new InternalSearchHits(null, total, 0);
|
||||
InternalSearchResponse searchResponse = new InternalSearchResponse(hits, null, null, null, false, false);
|
||||
new DummyAbstractAsyncBulkByScrollAction()
|
||||
.onScrollResponse(new SearchResponse(searchResponse, scrollId(), 5, 4, randomLong(), null));
|
||||
assertEquals(total, task.getStatus().getTotal());
|
||||
}
|
||||
|
||||
public void testEachScrollResponseIsABatch() {
|
||||
// Replace the generic thread pool with one that executes immediately so the batch is updated immediately
|
||||
threadPool.shutdown();
|
||||
threadPool = new ThreadPool(getTestName()) {
|
||||
@Override
|
||||
public Executor generic() {
|
||||
return new Executor() {
|
||||
@Override
|
||||
public void execute(Runnable command) {
|
||||
command.run();
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
int maxBatches = randomIntBetween(0, 100);
|
||||
for (int batches = 1; batches < maxBatches; batches++) {
|
||||
InternalSearchHit hit = new InternalSearchHit(0, "id", new Text("type"), emptyMap());
|
||||
InternalSearchHits hits = new InternalSearchHits(new InternalSearchHit[] { hit }, 0, 0);
|
||||
InternalSearchResponse searchResponse = new InternalSearchResponse(hits, null, null, null, false, false);
|
||||
new DummyAbstractAsyncBulkByScrollAction()
|
||||
.onScrollResponse(new SearchResponse(searchResponse, scrollId(), 5, 4, randomLong(), null));
|
||||
|
||||
assertEquals(batches, task.getStatus().getBatches());
|
||||
}
|
||||
}
|
||||
|
||||
public void testBulkResponseSetsLotsOfStatus() {
|
||||
mainRequest.setAbortOnVersionConflict(false);
|
||||
int maxBatches = randomIntBetween(0, 100);
|
||||
long versionConflicts = 0;
|
||||
long created = 0;
|
||||
long updated = 0;
|
||||
long deleted = 0;
|
||||
for (int batches = 0; batches < maxBatches; batches++) {
|
||||
BulkItemResponse[] responses = new BulkItemResponse[randomIntBetween(0, 100)];
|
||||
for (int i = 0; i < responses.length; i++) {
|
||||
ShardId shardId = new ShardId(new Index("name", "uid"), 0);
|
||||
String opType;
|
||||
if (rarely()) {
|
||||
opType = randomSimpleString(random());
|
||||
versionConflicts++;
|
||||
responses[i] = new BulkItemResponse(i, opType, new Failure(shardId.getIndexName(), "type", "id" + i,
|
||||
new VersionConflictEngineException(shardId, "type", "id", "test")));
|
||||
continue;
|
||||
}
|
||||
boolean createdResponse;
|
||||
switch (randomIntBetween(0, 2)) {
|
||||
case 0:
|
||||
opType = randomFrom("index", "create");
|
||||
createdResponse = true;
|
||||
created++;
|
||||
break;
|
||||
case 1:
|
||||
opType = randomFrom("index", "create");
|
||||
createdResponse = false;
|
||||
updated++;
|
||||
break;
|
||||
case 2:
|
||||
opType = "delete";
|
||||
createdResponse = false;
|
||||
deleted++;
|
||||
break;
|
||||
default:
|
||||
throw new RuntimeException("Bad scenario");
|
||||
}
|
||||
responses[i] = new BulkItemResponse(i, opType, new IndexResponse(shardId, "type", "id" + i, randomInt(), createdResponse));
|
||||
}
|
||||
new DummyAbstractAsyncBulkByScrollAction().onBulkResponse(new BulkResponse(responses, 0));
|
||||
assertEquals(versionConflicts, task.getStatus().getVersionConflicts());
|
||||
assertEquals(updated, task.getStatus().getUpdated());
|
||||
assertEquals(created, task.getStatus().getCreated());
|
||||
assertEquals(deleted, task.getStatus().getDeleted());
|
||||
assertEquals(versionConflicts, task.getStatus().getVersionConflicts());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Mimicks a ThreadPool rejecting execution of the task.
|
||||
*/
|
||||
public void testThreadPoolRejectionsAbortRequest() throws Exception {
|
||||
threadPool.shutdown();
|
||||
threadPool = new ThreadPool(getTestName()) {
|
||||
@Override
|
||||
public Executor generic() {
|
||||
return new Executor() {
|
||||
@Override
|
||||
public void execute(Runnable command) {
|
||||
((AbstractRunnable) command).onRejection(new EsRejectedExecutionException("test"));
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
InternalSearchHits hits = new InternalSearchHits(null, 0, 0);
|
||||
InternalSearchResponse searchResponse = new InternalSearchResponse(hits, null, null, null, false, false);
|
||||
new DummyAbstractAsyncBulkByScrollAction()
|
||||
.onScrollResponse(new SearchResponse(searchResponse, scrollId(), 5, 4, randomLong(), null));
|
||||
try {
|
||||
listener.get();
|
||||
fail("Expected a failure");
|
||||
} catch (ExecutionException e) {
|
||||
assertThat(e.getMessage(), equalTo("EsRejectedExecutionException[test]"));
|
||||
}
|
||||
assertThat(client.scrollsCleared, contains(scrollId));
|
||||
}
|
||||
|
||||
/**
|
||||
* Mimicks shard search failures usually caused by the data node serving the
|
||||
* scroll request going down.
|
||||
*/
|
||||
public void testShardFailuresAbortRequest() throws Exception {
|
||||
ShardSearchFailure shardFailure = new ShardSearchFailure(new RuntimeException("test"));
|
||||
new DummyAbstractAsyncBulkByScrollAction()
|
||||
.onScrollResponse(new SearchResponse(null, scrollId(), 5, 4, randomLong(), new ShardSearchFailure[] { shardFailure }));
|
||||
BulkIndexByScrollResponse response = listener.get();
|
||||
assertThat(response.getIndexingFailures(), emptyCollectionOf(Failure.class));
|
||||
assertThat(response.getSearchFailures(), contains(shardFailure));
|
||||
assertNull(response.getReasonCancelled());
|
||||
assertThat(client.scrollsCleared, contains(scrollId));
|
||||
}
|
||||
|
||||
/**
|
||||
* Mimicks bulk indexing failures.
|
||||
*/
|
||||
public void testBulkFailuresAbortRequest() throws Exception {
|
||||
Failure failure = new Failure("index", "type", "id", new RuntimeException("test"));
|
||||
DummyAbstractAsyncBulkByScrollAction action = new DummyAbstractAsyncBulkByScrollAction();
|
||||
action.onBulkResponse(new BulkResponse(new BulkItemResponse[] {new BulkItemResponse(0, "index", failure)}, randomLong()));
|
||||
BulkIndexByScrollResponse response = listener.get();
|
||||
assertThat(response.getIndexingFailures(), contains(failure));
|
||||
assertThat(response.getSearchFailures(), emptyCollectionOf(ShardSearchFailure.class));
|
||||
assertNull(response.getReasonCancelled());
|
||||
}
|
||||
|
||||
/**
|
||||
* Mimicks script failures or general wrongness by implementers.
|
||||
*/
|
||||
public void testListenerReceiveBuildBulkExceptions() throws Exception {
|
||||
DummyAbstractAsyncBulkByScrollAction action = new DummyAbstractAsyncBulkByScrollAction() {
|
||||
@Override
|
||||
protected BulkRequest buildBulk(Iterable<SearchHit> docs) {
|
||||
throw new RuntimeException("surprise");
|
||||
}
|
||||
};
|
||||
InternalSearchHit hit = new InternalSearchHit(0, "id", new Text("type"), emptyMap());
|
||||
InternalSearchHits hits = new InternalSearchHits(new InternalSearchHit[] {hit}, 0, 0);
|
||||
InternalSearchResponse searchResponse = new InternalSearchResponse(hits, null, null, null, false, false);
|
||||
action.onScrollResponse(new SearchResponse(searchResponse, scrollId(), 5, 4, randomLong(), null));
|
||||
try {
|
||||
listener.get();
|
||||
fail("Expected failure.");
|
||||
} catch (ExecutionException e) {
|
||||
assertThat(e.getCause(), instanceOf(RuntimeException.class));
|
||||
assertThat(e.getCause().getMessage(), equalTo("surprise"));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Mimicks bulk rejections. These should be retried and eventually succeed.
|
||||
*/
|
||||
public void testBulkRejectionsRetryWithEnoughRetries() throws Exception {
|
||||
int bulksToTry = randomIntBetween(1, 10);
|
||||
long retryAttempts = 0;
|
||||
for (int i = 0; i < bulksToTry; i++) {
|
||||
retryAttempts += retryTestCase(false);
|
||||
assertEquals(retryAttempts, task.getStatus().getRetries());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Mimicks bulk rejections. These should be retried but we fail anyway because we run out of retries.
|
||||
*/
|
||||
public void testBulkRejectionsRetryAndFailAnyway() throws Exception {
|
||||
long retryAttempts = retryTestCase(true);
|
||||
assertEquals(retryAttempts, task.getStatus().getRetries());
|
||||
}
|
||||
|
||||
private long retryTestCase(boolean failWithRejection) throws Exception {
|
||||
int totalFailures = randomIntBetween(1, mainRequest.getMaxRetries());
|
||||
int size = randomIntBetween(1, 100);
|
||||
int retryAttempts = totalFailures - (failWithRejection ? 1 : 0);
|
||||
|
||||
client.bulksToReject = client.bulksAttempts.get() + totalFailures;
|
||||
/*
|
||||
* When we get a successful bulk response we usually start the next scroll request but lets just intercept that so we don't have to
|
||||
* deal with it. We just wait for it to happen.
|
||||
*/
|
||||
CountDownLatch successLatch = new CountDownLatch(1);
|
||||
DummyAbstractAsyncBulkByScrollAction action = new DummyAbstractAsyncBulkByScrollAction() {
|
||||
@Override
|
||||
BackoffPolicy backoffPolicy() {
|
||||
// Force a backoff time of 0 to prevent sleeping
|
||||
return constantBackoff(timeValueMillis(0), retryAttempts);
|
||||
}
|
||||
|
||||
@Override
|
||||
void startNextScroll() {
|
||||
successLatch.countDown();
|
||||
}
|
||||
};
|
||||
BulkRequest request = new BulkRequest();
|
||||
for (int i = 0; i < size + 1; i++) {
|
||||
request.add(new IndexRequest("index", "type", "id" + i));
|
||||
}
|
||||
action.sendBulkRequest(request);
|
||||
if (failWithRejection) {
|
||||
BulkIndexByScrollResponse response = listener.get();
|
||||
assertThat(response.getIndexingFailures(), hasSize(1));
|
||||
assertEquals(response.getIndexingFailures().get(0).getStatus(), RestStatus.TOO_MANY_REQUESTS);
|
||||
assertThat(response.getSearchFailures(), emptyCollectionOf(ShardSearchFailure.class));
|
||||
assertNull(response.getReasonCancelled());
|
||||
} else {
|
||||
successLatch.await(10, TimeUnit.SECONDS);
|
||||
}
|
||||
return retryAttempts;
|
||||
}
|
||||
|
||||
/**
|
||||
* The default retry time matches what we say it is in the javadoc for the request.
|
||||
*/
|
||||
public void testDefaultRetryTimes() {
|
||||
Iterator<TimeValue> policy = new DummyAbstractAsyncBulkByScrollAction().backoffPolicy().iterator();
|
||||
long millis = 0;
|
||||
while (policy.hasNext()) {
|
||||
millis += policy.next().millis();
|
||||
}
|
||||
/*
|
||||
* This is the total number of milliseconds that a reindex made with the default settings will backoff before attempting one final
|
||||
* time. If that request is rejected then the whole process fails with a rejected exception.
|
||||
*/
|
||||
int defaultBackoffBeforeFailing = 59460;
|
||||
assertEquals(defaultBackoffBeforeFailing, millis);
|
||||
}
|
||||
|
||||
public void testCancelBeforeInitialSearch() throws Exception {
|
||||
cancelTaskCase((DummyAbstractAsyncBulkByScrollAction action) -> action.initialSearch());
|
||||
}
|
||||
|
||||
public void testCancelBeforeScrollResponse() throws Exception {
|
||||
// We bail so early we don't need to pass in a half way valid response.
|
||||
cancelTaskCase((DummyAbstractAsyncBulkByScrollAction action) -> action.onScrollResponse(null));
|
||||
}
|
||||
|
||||
public void testCancelBeforeSendBulkRequest() throws Exception {
|
||||
// We bail so early we don't need to pass in a half way valid request.
|
||||
cancelTaskCase((DummyAbstractAsyncBulkByScrollAction action) -> action.sendBulkRequest(null));
|
||||
}
|
||||
|
||||
public void testCancelBeforeOnBulkResponse() throws Exception {
|
||||
// We bail so early we don't need to pass in a half way valid response.
|
||||
cancelTaskCase((DummyAbstractAsyncBulkByScrollAction action) -> action.onBulkResponse(null));
|
||||
}
|
||||
|
||||
public void testCancelBeforeStartNextScroll() throws Exception {
|
||||
cancelTaskCase((DummyAbstractAsyncBulkByScrollAction action) -> action.startNextScroll());
|
||||
}
|
||||
|
||||
public void testCancelBeforeStartNormalTermination() throws Exception {
|
||||
// Refresh or not doesn't matter - we don't try to refresh.
|
||||
mainRequest.setRefresh(usually());
|
||||
cancelTaskCase((DummyAbstractAsyncBulkByScrollAction action) -> action.startNormalTermination(emptyList(), emptyList()));
|
||||
// This wouldn't return if we called refresh - the action would hang waiting for the refresh that we haven't mocked.
|
||||
}
|
||||
|
||||
private void cancelTaskCase(Consumer<DummyAbstractAsyncBulkByScrollAction> testMe) throws Exception {
|
||||
DummyAbstractAsyncBulkByScrollAction action = new DummyAbstractAsyncBulkByScrollAction();
|
||||
boolean previousScrollSet = usually();
|
||||
if (previousScrollSet) {
|
||||
action.setScroll(scrollId());
|
||||
}
|
||||
String reason = randomSimpleString(random());
|
||||
taskManager.cancel(task, reason, (Set<String> s) -> {});
|
||||
testMe.accept(action);
|
||||
assertEquals(reason, listener.get().getReasonCancelled());
|
||||
if (previousScrollSet) {
|
||||
// Canceled tasks always start to clear the scroll before they die.
|
||||
assertThat(client.scrollsCleared, contains(scrollId));
|
||||
}
|
||||
}
|
||||
|
||||
private class DummyAbstractAsyncBulkByScrollAction
|
||||
extends AbstractAsyncBulkByScrollAction<DummyAbstractBulkByScrollRequest, BulkIndexByScrollResponse> {
|
||||
public DummyAbstractAsyncBulkByScrollAction() {
|
||||
super(AsyncBulkByScrollActionTests.this.task, logger, client, threadPool,
|
||||
AsyncBulkByScrollActionTests.this.mainRequest, firstSearchRequest, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected BulkRequest buildBulk(Iterable<SearchHit> docs) {
|
||||
return new BulkRequest();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected BulkIndexByScrollResponse buildResponse(TimeValue took, List<Failure> indexingFailures,
|
||||
List<ShardSearchFailure> searchFailures) {
|
||||
return new BulkIndexByScrollResponse(took, task.getStatus(), indexingFailures, searchFailures);
|
||||
}
|
||||
}
|
||||
|
||||
private static class DummyAbstractBulkByScrollRequest extends AbstractBulkByScrollRequest<DummyAbstractBulkByScrollRequest> {
|
||||
@Override
|
||||
protected DummyAbstractBulkByScrollRequest self() {
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
private static class MyMockClient extends FilterClient {
|
||||
private final List<String> scrollsCleared = new ArrayList<>();
|
||||
private final AtomicInteger bulksAttempts = new AtomicInteger();
|
||||
|
||||
private int bulksToReject = 0;
|
||||
|
||||
public MyMockClient(Client in) {
|
||||
super(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
protected <Request extends ActionRequest<Request>, Response extends ActionResponse,
|
||||
RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(
|
||||
Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
|
||||
if (request instanceof ClearScrollRequest) {
|
||||
ClearScrollRequest clearScroll = (ClearScrollRequest) request;
|
||||
scrollsCleared.addAll(clearScroll.getScrollIds());
|
||||
listener.onResponse((Response) new ClearScrollResponse(true, clearScroll.getScrollIds().size()));
|
||||
return;
|
||||
}
|
||||
if (request instanceof BulkRequest) {
|
||||
BulkRequest bulk = (BulkRequest) request;
|
||||
int toReject;
|
||||
if (bulksAttempts.incrementAndGet() > bulksToReject) {
|
||||
toReject = -1;
|
||||
} else {
|
||||
toReject = randomIntBetween(0, bulk.requests().size() - 1);
|
||||
}
|
||||
BulkItemResponse[] responses = new BulkItemResponse[bulk.requests().size()];
|
||||
for (int i = 0; i < bulk.requests().size(); i++) {
|
||||
ActionRequest<?> item = bulk.requests().get(i);
|
||||
String opType;
|
||||
DocWriteResponse response;
|
||||
ShardId shardId = new ShardId(new Index(((ReplicationRequest<?>) item).index(), "uuid"), 0);
|
||||
if (item instanceof IndexRequest) {
|
||||
IndexRequest index = (IndexRequest) item;
|
||||
opType = index.opType().lowercase();
|
||||
response = new IndexResponse(shardId, index.type(), index.id(), randomIntBetween(0, Integer.MAX_VALUE),
|
||||
true);
|
||||
} else if (item instanceof UpdateRequest) {
|
||||
UpdateRequest update = (UpdateRequest) item;
|
||||
opType = "update";
|
||||
response = new UpdateResponse(shardId, update.type(), update.id(),
|
||||
randomIntBetween(0, Integer.MAX_VALUE), true);
|
||||
} else if (item instanceof DeleteRequest) {
|
||||
DeleteRequest delete = (DeleteRequest) item;
|
||||
opType = "delete";
|
||||
response = new DeleteResponse(shardId, delete.type(), delete.id(), randomIntBetween(0, Integer.MAX_VALUE),
|
||||
true);
|
||||
} else {
|
||||
throw new RuntimeException("Unknown request: " + item);
|
||||
}
|
||||
if (i == toReject) {
|
||||
responses[i] = new BulkItemResponse(i, opType,
|
||||
new Failure(response.getIndex(), response.getType(), response.getId(), new EsRejectedExecutionException()));
|
||||
} else {
|
||||
responses[i] = new BulkItemResponse(i, opType, response);
|
||||
}
|
||||
}
|
||||
listener.onResponse((Response) new BulkResponse(responses, 1));
|
||||
return;
|
||||
}
|
||||
super.doExecute(action, request, listener);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,113 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.junit.Before;
|
||||
|
||||
public class BulkByScrollTaskTests extends ESTestCase {
|
||||
private BulkByScrollTask task;
|
||||
|
||||
@Before
|
||||
public void createTask() {
|
||||
task = new BulkByScrollTask(1, "test_type", "test_action", "test");
|
||||
}
|
||||
|
||||
public void testBasicData() {
|
||||
assertEquals(1, task.getId());
|
||||
assertEquals("test_type", task.getType());
|
||||
assertEquals("test_action", task.getAction());
|
||||
}
|
||||
|
||||
public void testProgress() {
|
||||
long created = 0;
|
||||
long updated = 0;
|
||||
long deleted = 0;
|
||||
long versionConflicts = 0;
|
||||
long noops = 0;
|
||||
int batch = 0;
|
||||
BulkByScrollTask.Status status = task.getStatus();
|
||||
assertEquals(0, status.getTotal());
|
||||
assertEquals(created, status.getCreated());
|
||||
assertEquals(updated, status.getUpdated());
|
||||
assertEquals(deleted, status.getDeleted());
|
||||
assertEquals(versionConflicts, status.getVersionConflicts());
|
||||
assertEquals(batch, status.getBatches());
|
||||
assertEquals(noops, status.getNoops());
|
||||
|
||||
long totalHits = randomIntBetween(10, 1000);
|
||||
task.setTotal(totalHits);
|
||||
for (long p = 0; p < totalHits; p++) {
|
||||
status = task.getStatus();
|
||||
assertEquals(totalHits, status.getTotal());
|
||||
assertEquals(created, status.getCreated());
|
||||
assertEquals(updated, status.getUpdated());
|
||||
assertEquals(deleted, status.getDeleted());
|
||||
assertEquals(versionConflicts, status.getVersionConflicts());
|
||||
assertEquals(batch, status.getBatches());
|
||||
assertEquals(noops, status.getNoops());
|
||||
|
||||
if (randomBoolean()) {
|
||||
created++;
|
||||
task.countCreated();
|
||||
} else if (randomBoolean()) {
|
||||
updated++;
|
||||
task.countUpdated();
|
||||
} else {
|
||||
deleted++;
|
||||
task.countDeleted();
|
||||
}
|
||||
|
||||
if (rarely()) {
|
||||
versionConflicts++;
|
||||
task.countVersionConflict();
|
||||
}
|
||||
|
||||
if (rarely()) {
|
||||
batch++;
|
||||
task.countBatch();
|
||||
}
|
||||
|
||||
if (rarely()) {
|
||||
noops++;
|
||||
task.countNoop();
|
||||
}
|
||||
}
|
||||
status = task.getStatus();
|
||||
assertEquals(totalHits, status.getTotal());
|
||||
assertEquals(created, status.getCreated());
|
||||
assertEquals(updated, status.getUpdated());
|
||||
assertEquals(deleted, status.getDeleted());
|
||||
assertEquals(versionConflicts, status.getVersionConflicts());
|
||||
assertEquals(batch, status.getBatches());
|
||||
assertEquals(noops, status.getNoops());
|
||||
}
|
||||
|
||||
public void testStatusHatesNegatives() {
|
||||
expectThrows(IllegalArgumentException.class, () -> new BulkByScrollTask.Status(-1, 0, 0, 0, 0, 0, 0, 0, null));
|
||||
expectThrows(IllegalArgumentException.class, () -> new BulkByScrollTask.Status(0, -1, 0, 0, 0, 0, 0, 0, null));
|
||||
expectThrows(IllegalArgumentException.class, () -> new BulkByScrollTask.Status(0, 0, -1, 0, 0, 0, 0, 0, null));
|
||||
expectThrows(IllegalArgumentException.class, () -> new BulkByScrollTask.Status(0, 0, 0, -1, 0, 0, 0, 0, null));
|
||||
expectThrows(IllegalArgumentException.class, () -> new BulkByScrollTask.Status(0, 0, 0, 0, -1, 0, 0, 0, null));
|
||||
expectThrows(IllegalArgumentException.class, () -> new BulkByScrollTask.Status(0, 0, 0, 0, 0, -1, 0, 0, null));
|
||||
expectThrows(IllegalArgumentException.class, () -> new BulkByScrollTask.Status(0, 0, 0, 0, 0, 0, -1, 0, null));
|
||||
expectThrows(IllegalArgumentException.class, () -> new BulkByScrollTask.Status(0, 0, 0, 0, 0, 0, 0, -1, null));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,146 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.ListenableActionFuture;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskInfo;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.script.ExecutableScript;
|
||||
import org.elasticsearch.script.NativeScriptFactory;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptModule;
|
||||
import org.elasticsearch.script.ScriptService.ScriptType;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.BrokenBarrierException;
|
||||
import java.util.concurrent.CyclicBarrier;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static org.elasticsearch.test.ESIntegTestCase.client;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
import static org.junit.Assert.assertThat;
|
||||
|
||||
/**
|
||||
* Utilities for testing reindex and update-by-query cancelation. This whole class isn't thread safe. Luckily we run out tests in separate
|
||||
* jvms.
|
||||
*/
|
||||
public class CancelTestUtils {
|
||||
public static Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
return Arrays.asList(ReindexPlugin.class, StickyScriptPlugin.class);
|
||||
}
|
||||
|
||||
private static final CyclicBarrier barrier = new CyclicBarrier(2);
|
||||
|
||||
public static <Request extends AbstractBulkIndexByScrollRequest<Request>,
|
||||
Response extends ActionResponse,
|
||||
Builder extends AbstractBulkIndexByScrollRequestBuilder<Request, Response, Builder>>
|
||||
Response testCancel(ESIntegTestCase test, Builder request, String actionToCancel) throws Exception {
|
||||
|
||||
test.indexRandom(true, client().prepareIndex("source", "test", "1").setSource("foo", "a"),
|
||||
client().prepareIndex("source", "test", "2").setSource("foo", "a"));
|
||||
|
||||
request.source("source").script(new Script("sticky", ScriptType.INLINE, "native", emptyMap()));
|
||||
request.source().setSize(1);
|
||||
ListenableActionFuture<Response> response = request.execute();
|
||||
|
||||
// Wait until the script is on the first document.
|
||||
barrier.await(30, TimeUnit.SECONDS);
|
||||
|
||||
// Let just one document through.
|
||||
barrier.await(30, TimeUnit.SECONDS);
|
||||
|
||||
// Wait until the script is on the second document.
|
||||
barrier.await(30, TimeUnit.SECONDS);
|
||||
|
||||
// Cancel the request while the script is running. This will prevent the request from being sent at all.
|
||||
List<TaskInfo> cancelledTasks = client().admin().cluster().prepareCancelTasks().setActions(actionToCancel).get().getTasks();
|
||||
assertThat(cancelledTasks, hasSize(1));
|
||||
|
||||
// Now let the next document through. It won't be sent because the request is cancelled but we need to unblock the script.
|
||||
barrier.await();
|
||||
|
||||
// Now we can just wait on the request and make sure it was actually cancelled half way through.
|
||||
return response.get();
|
||||
}
|
||||
|
||||
public static class StickyScriptPlugin extends Plugin {
|
||||
@Override
|
||||
public String name() {
|
||||
return "sticky-script";
|
||||
}
|
||||
|
||||
@Override
|
||||
public String description() {
|
||||
return "installs a script that \"sticks\" when it runs for testing reindex";
|
||||
}
|
||||
|
||||
public void onModule(ScriptModule module) {
|
||||
module.registerScript("sticky", StickyScriptFactory.class);
|
||||
}
|
||||
}
|
||||
|
||||
public static class StickyScriptFactory implements NativeScriptFactory {
|
||||
@Override
|
||||
public ExecutableScript newScript(Map<String, Object> params) {
|
||||
return new ExecutableScript() {
|
||||
private Map<String, Object> source;
|
||||
@Override
|
||||
@SuppressWarnings("unchecked") // Safe because _ctx always has this shape
|
||||
public void setNextVar(String name, Object value) {
|
||||
if ("ctx".equals(name)) {
|
||||
Map<String, Object> ctx = (Map<String, Object>) value;
|
||||
source = (Map<String, Object>) ctx.get("_source");
|
||||
} else {
|
||||
throw new IllegalArgumentException("Unexpected var: " + name);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object run() {
|
||||
try {
|
||||
// Tell the test we've started a document.
|
||||
barrier.await(30, TimeUnit.SECONDS);
|
||||
|
||||
// Wait for the test to tell us to proceed.
|
||||
barrier.await(30, TimeUnit.SECONDS);
|
||||
|
||||
// Make some change to the source so that update-by-query tests can make sure only one document was changed.
|
||||
source.put("giraffes", "giraffes");
|
||||
return null;
|
||||
} catch (InterruptedException | BrokenBarrierException | TimeoutException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean needsScores() {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,123 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||
|
||||
public class ReindexBasicTests extends ReindexTestCase {
|
||||
public void testFiltering() throws Exception {
|
||||
indexRandom(true, client().prepareIndex("source", "test", "1").setSource("foo", "a"),
|
||||
client().prepareIndex("source", "test", "2").setSource("foo", "a"),
|
||||
client().prepareIndex("source", "test", "3").setSource("foo", "b"),
|
||||
client().prepareIndex("source", "test", "4").setSource("foo", "c"));
|
||||
assertHitCount(client().prepareSearch("source").setSize(0).get(), 4);
|
||||
|
||||
// Copy all the docs
|
||||
ReindexRequestBuilder copy = reindex().source("source").destination("dest", "all").refresh(true);
|
||||
assertThat(copy.get(), responseMatcher().created(4));
|
||||
assertHitCount(client().prepareSearch("dest").setTypes("all").setSize(0).get(), 4);
|
||||
|
||||
// Now none of them
|
||||
copy = reindex().source("source").destination("all", "none").filter(termQuery("foo", "no_match")).refresh(true);
|
||||
assertThat(copy.get(), responseMatcher().created(0));
|
||||
assertHitCount(client().prepareSearch("dest").setTypes("none").setSize(0).get(), 0);
|
||||
|
||||
// Now half of them
|
||||
copy = reindex().source("source").destination("dest", "half").filter(termQuery("foo", "a")).refresh(true);
|
||||
assertThat(copy.get(), responseMatcher().created(2));
|
||||
assertHitCount(client().prepareSearch("dest").setTypes("half").setSize(0).get(), 2);
|
||||
|
||||
// Limit with size
|
||||
copy = reindex().source("source").destination("dest", "size_one").size(1).refresh(true);
|
||||
assertThat(copy.get(), responseMatcher().created(1));
|
||||
assertHitCount(client().prepareSearch("dest").setTypes("size_one").setSize(0).get(), 1);
|
||||
}
|
||||
|
||||
public void testCopyMany() throws Exception {
|
||||
List<IndexRequestBuilder> docs = new ArrayList<>();
|
||||
int max = between(150, 500);
|
||||
for (int i = 0; i < max; i++) {
|
||||
docs.add(client().prepareIndex("source", "test", Integer.toString(i)).setSource("foo", "a"));
|
||||
}
|
||||
|
||||
indexRandom(true, docs);
|
||||
assertHitCount(client().prepareSearch("source").setSize(0).get(), max);
|
||||
|
||||
// Copy all the docs
|
||||
ReindexRequestBuilder copy = reindex().source("source").destination("dest", "all").refresh(true);
|
||||
// Use a small batch size so we have to use more than one batch
|
||||
copy.source().setSize(5);
|
||||
assertThat(copy.get(), responseMatcher().created(max).batches(max, 5));
|
||||
assertHitCount(client().prepareSearch("dest").setTypes("all").setSize(0).get(), max);
|
||||
|
||||
// Copy some of the docs
|
||||
int half = max / 2;
|
||||
copy = reindex().source("source").destination("dest", "half").refresh(true);
|
||||
// Use a small batch size so we have to use more than one batch
|
||||
copy.source().setSize(5);
|
||||
copy.size(half); // The real "size" of the request.
|
||||
assertThat(copy.get(), responseMatcher().created(half).batches(half, 5));
|
||||
assertHitCount(client().prepareSearch("dest").setTypes("half").setSize(0).get(), half);
|
||||
}
|
||||
|
||||
public void testRefreshIsFalseByDefault() throws Exception {
|
||||
refreshTestCase(null, false);
|
||||
}
|
||||
|
||||
public void testRefreshFalseDoesntMakeVisible() throws Exception {
|
||||
refreshTestCase(false, false);
|
||||
}
|
||||
|
||||
public void testRefreshTrueMakesVisible() throws Exception {
|
||||
refreshTestCase(true, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a reindex into an index with -1 refresh_interval and checks that
|
||||
* the documents are visible properly.
|
||||
*/
|
||||
private void refreshTestCase(Boolean refresh, boolean visible) throws Exception {
|
||||
CreateIndexRequestBuilder create = client().admin().indices().prepareCreate("dest").setSettings("refresh_interval", -1);
|
||||
assertAcked(create);
|
||||
ensureYellow();
|
||||
indexRandom(true, client().prepareIndex("source", "test", "1").setSource("foo", "a"),
|
||||
client().prepareIndex("source", "test", "2").setSource("foo", "a"),
|
||||
client().prepareIndex("source", "test", "3").setSource("foo", "b"),
|
||||
client().prepareIndex("source", "test", "4").setSource("foo", "c"));
|
||||
assertHitCount(client().prepareSearch("source").setSize(0).get(), 4);
|
||||
|
||||
// Copy all the docs
|
||||
ReindexRequestBuilder copy = reindex().source("source").destination("dest", "all");
|
||||
if (refresh != null) {
|
||||
copy.refresh(refresh);
|
||||
}
|
||||
assertThat(copy.get(), responseMatcher().created(4));
|
||||
|
||||
assertHitCount(client().prepareSearch("dest").setTypes("all").setSize(0).get(), visible ? 4 : 0);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,52 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
|
||||
import java.util.Collection;
|
||||
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
/**
|
||||
* Tests that you can actually cancel a reindex request and all the plumbing works. Doesn't test all of the different cancellation places -
|
||||
* that is the responsibility of {@link AsyncBulkByScrollActionTests} which have more precise control to simulate failures but do not
|
||||
* exercise important portion of the stack like transport and task management.
|
||||
*/
|
||||
public class ReindexCancelTests extends ReindexTestCase {
|
||||
public void testCancel() throws Exception {
|
||||
ReindexResponse response = CancelTestUtils.testCancel(this, reindex().destination("dest", "test"), ReindexAction.NAME);
|
||||
|
||||
assertThat(response, responseMatcher().created(1).reasonCancelled(equalTo("by user request")));
|
||||
refresh("dest");
|
||||
assertHitCount(client().prepareSearch("dest").setSize(0).get(), 1);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int numberOfShards() {
|
||||
return 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
return CancelTestUtils.nodePlugins();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,147 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse.Failure;
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.Future;
|
||||
|
||||
import static org.elasticsearch.action.index.IndexRequest.OpType.CREATE;
|
||||
import static org.hamcrest.Matchers.both;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.either;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.lessThanOrEqualTo;
|
||||
|
||||
/**
|
||||
* Tests failure capturing and abort-on-failure behavior of reindex.
|
||||
*/
|
||||
public class ReindexFailureTests extends ReindexTestCase {
|
||||
public void testFailuresCauseAbortDefault() throws Exception {
|
||||
/*
|
||||
* Create the destination index such that the copy will cause a mapping
|
||||
* conflict on every request.
|
||||
*/
|
||||
indexRandom(true,
|
||||
client().prepareIndex("dest", "test", "test").setSource("test", 10) /* Its a string in the source! */);
|
||||
|
||||
indexDocs(100);
|
||||
|
||||
ReindexRequestBuilder copy = reindex().source("source").destination("dest");
|
||||
/*
|
||||
* Set the search size to something very small to cause there to be
|
||||
* multiple batches for this request so we can assert that we abort on
|
||||
* the first batch.
|
||||
*/
|
||||
copy.source().setSize(1);
|
||||
|
||||
ReindexResponse response = copy.get();
|
||||
assertThat(response, responseMatcher()
|
||||
.batches(1)
|
||||
.failures(both(greaterThan(0)).and(lessThanOrEqualTo(maximumNumberOfShards()))));
|
||||
for (Failure failure: response.getIndexingFailures()) {
|
||||
assertThat(failure.getMessage(), containsString("NumberFormatException[For input string: \"words words\"]"));
|
||||
}
|
||||
}
|
||||
|
||||
public void testAbortOnVersionConflict() throws Exception {
|
||||
// Just put something in the way of the copy.
|
||||
indexRandom(true,
|
||||
client().prepareIndex("dest", "test", "1").setSource("test", "test"));
|
||||
|
||||
indexDocs(100);
|
||||
|
||||
ReindexRequestBuilder copy = reindex().source("source").destination("dest").abortOnVersionConflict(true);
|
||||
// CREATE will cause the conflict to prevent the write.
|
||||
copy.destination().setOpType(CREATE);
|
||||
|
||||
ReindexResponse response = copy.get();
|
||||
assertThat(response, responseMatcher().batches(1).versionConflicts(1).failures(1).created(99));
|
||||
for (Failure failure: response.getIndexingFailures()) {
|
||||
assertThat(failure.getMessage(), containsString("VersionConflictEngineException[[test]["));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Make sure that search failures get pushed back to the user as failures of
|
||||
* the whole process. We do lose some information about how far along the
|
||||
* process got, but its important that they see these failures.
|
||||
*/
|
||||
public void testResponseOnSearchFailure() throws Exception {
|
||||
/*
|
||||
* Attempt to trigger a reindex failure by deleting the source index out
|
||||
* from under it.
|
||||
*/
|
||||
int attempt = 1;
|
||||
while (attempt < 5) {
|
||||
indexDocs(100);
|
||||
ReindexRequestBuilder copy = reindex().source("source").destination("dest");
|
||||
copy.source().setSize(10);
|
||||
Future<ReindexResponse> response = copy.execute();
|
||||
client().admin().indices().prepareDelete("source").get();
|
||||
|
||||
try {
|
||||
response.get();
|
||||
logger.info("Didn't trigger a reindex failure on the {} attempt", attempt);
|
||||
attempt++;
|
||||
} catch (ExecutionException e) {
|
||||
logger.info("Triggered a reindex failure on the {} attempt", attempt);
|
||||
assertThat(e.getMessage(), either(containsString("all shards failed")).or(containsString("No search context found")));
|
||||
return;
|
||||
}
|
||||
}
|
||||
assumeFalse("Wasn't able to trigger a reindex failure in " + attempt + " attempts.", true);
|
||||
}
|
||||
|
||||
public void testSettingTtlIsValidationFailure() throws Exception {
|
||||
indexDocs(1);
|
||||
ReindexRequestBuilder copy = reindex().source("source").destination("dest");
|
||||
copy.destination().setTTL(123);
|
||||
try {
|
||||
copy.get();
|
||||
} catch (ActionRequestValidationException e) {
|
||||
assertThat(e.getMessage(), containsString("setting ttl on destination isn't supported. use scripts instead."));
|
||||
}
|
||||
}
|
||||
|
||||
public void testSettingTimestampIsValidationFailure() throws Exception {
|
||||
indexDocs(1);
|
||||
ReindexRequestBuilder copy = reindex().source("source").destination("dest");
|
||||
copy.destination().setTimestamp("now");
|
||||
try {
|
||||
copy.get();
|
||||
} catch (ActionRequestValidationException e) {
|
||||
assertThat(e.getMessage(), containsString("setting timestamp on destination isn't supported. use scripts instead."));
|
||||
}
|
||||
}
|
||||
|
||||
private void indexDocs(int count) throws Exception {
|
||||
List<IndexRequestBuilder> docs = new ArrayList<IndexRequestBuilder>(count);
|
||||
for (int i = 0; i < count; i++) {
|
||||
docs.add(client().prepareIndex("source", "test", Integer.toString(i)).setSource("test", "words words"));
|
||||
}
|
||||
indexRandom(true, docs);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,77 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.index.mapper.internal.RoutingFieldMapper;
|
||||
|
||||
/**
|
||||
* Index-by-search test for ttl, timestamp, and routing.
|
||||
*/
|
||||
public class ReindexMetadataTests extends AbstractAsyncBulkIndexbyScrollActionMetadataTestCase<ReindexRequest, ReindexResponse> {
|
||||
public void testRoutingCopiedByDefault() throws Exception {
|
||||
IndexRequest index = new IndexRequest();
|
||||
action().copyMetadata(index, doc(RoutingFieldMapper.NAME, "foo"));
|
||||
assertEquals("foo", index.routing());
|
||||
}
|
||||
|
||||
public void testRoutingCopiedIfRequested() throws Exception {
|
||||
TransportReindexAction.AsyncIndexBySearchAction action = action();
|
||||
action.mainRequest.getDestination().routing("keep");
|
||||
IndexRequest index = new IndexRequest();
|
||||
action.copyMetadata(index, doc(RoutingFieldMapper.NAME, "foo"));
|
||||
assertEquals("foo", index.routing());
|
||||
}
|
||||
|
||||
public void testRoutingDiscardedIfRequested() throws Exception {
|
||||
TransportReindexAction.AsyncIndexBySearchAction action = action();
|
||||
action.mainRequest.getDestination().routing("discard");
|
||||
IndexRequest index = new IndexRequest();
|
||||
action.copyMetadata(index, doc(RoutingFieldMapper.NAME, "foo"));
|
||||
assertEquals(null, index.routing());
|
||||
}
|
||||
|
||||
public void testRoutingSetIfRequested() throws Exception {
|
||||
TransportReindexAction.AsyncIndexBySearchAction action = action();
|
||||
action.mainRequest.getDestination().routing("=cat");
|
||||
IndexRequest index = new IndexRequest();
|
||||
action.copyMetadata(index, doc(RoutingFieldMapper.NAME, "foo"));
|
||||
assertEquals("cat", index.routing());
|
||||
}
|
||||
|
||||
public void testRoutingSetIfWithDegenerateValue() throws Exception {
|
||||
TransportReindexAction.AsyncIndexBySearchAction action = action();
|
||||
action.mainRequest.getDestination().routing("==]");
|
||||
IndexRequest index = new IndexRequest();
|
||||
action.copyMetadata(index, doc(RoutingFieldMapper.NAME, "foo"));
|
||||
assertEquals("=]", index.routing());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TransportReindexAction.AsyncIndexBySearchAction action() {
|
||||
return new TransportReindexAction.AsyncIndexBySearchAction(task, logger, null, null, threadPool, request(), listener());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ReindexRequest request() {
|
||||
return new ReindexRequest(new SearchRequest(), new IndexRequest());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,112 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
|
||||
import static org.elasticsearch.index.query.QueryBuilders.hasParentQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.idsQuery;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
/**
|
||||
* Index-by-search tests for parent/child.
|
||||
*/
|
||||
public class ReindexParentChildTests extends ReindexTestCase {
|
||||
QueryBuilder<?> findsCountry;
|
||||
QueryBuilder<?> findsCity;
|
||||
QueryBuilder<?> findsNeighborhood;
|
||||
|
||||
public void testParentChild() throws Exception {
|
||||
createParentChildIndex("source");
|
||||
createParentChildIndex("dest");
|
||||
createParentChildDocs("source");
|
||||
|
||||
// Copy parent to the new index
|
||||
ReindexRequestBuilder copy = reindex().source("source").destination("dest").filter(findsCountry).refresh(true);
|
||||
assertThat(copy.get(), responseMatcher().created(1));
|
||||
|
||||
// Copy the child to a new index
|
||||
copy = reindex().source("source").destination("dest").filter(findsCity).refresh(true);
|
||||
assertThat(copy.get(), responseMatcher().created(1));
|
||||
|
||||
// Make sure parent/child is intact on that index
|
||||
assertSearchHits(client().prepareSearch("dest").setQuery(findsCity).get(), "pittsburgh");
|
||||
|
||||
// Copy the grandchild to a new index
|
||||
copy = reindex().source("source").destination("dest").filter(findsNeighborhood).refresh(true);
|
||||
assertThat(copy.get(), responseMatcher().created(1));
|
||||
|
||||
// Make sure parent/child is intact on that index
|
||||
assertSearchHits(client().prepareSearch("dest").setQuery(findsNeighborhood).get(),
|
||||
"make-believe");
|
||||
|
||||
// Copy the parent/child/grandchild structure all at once to a third index
|
||||
createParentChildIndex("dest_all_at_once");
|
||||
copy = reindex().source("source").destination("dest_all_at_once").refresh(true);
|
||||
assertThat(copy.get(), responseMatcher().created(3));
|
||||
|
||||
// Make sure parent/child/grandchild is intact there too
|
||||
assertSearchHits(client().prepareSearch("dest_all_at_once").setQuery(findsNeighborhood).get(),
|
||||
"make-believe");
|
||||
}
|
||||
|
||||
public void testErrorMessageWhenBadParentChild() throws Exception {
|
||||
createParentChildIndex("source");
|
||||
createParentChildDocs("source");
|
||||
|
||||
ReindexRequestBuilder copy = reindex().source("source").destination("dest").filter(findsCity);
|
||||
try {
|
||||
copy.get();
|
||||
fail("Expected exception");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), equalTo("Can't specify parent if no parent field has been configured"));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Setup a parent/child index and return a query that should find the child
|
||||
* using the parent.
|
||||
*/
|
||||
private void createParentChildIndex(String indexName) throws Exception {
|
||||
CreateIndexRequestBuilder create = client().admin().indices().prepareCreate(indexName);
|
||||
create.addMapping("city", "{\"_parent\": {\"type\": \"country\"}}");
|
||||
create.addMapping("neighborhood", "{\"_parent\": {\"type\": \"city\"}}");
|
||||
assertAcked(create);
|
||||
ensureGreen();
|
||||
}
|
||||
|
||||
private void createParentChildDocs(String indexName) throws Exception {
|
||||
indexRandom(true, client().prepareIndex(indexName, "country", "united states").setSource("foo", "bar"),
|
||||
client().prepareIndex(indexName, "city", "pittsburgh").setParent("united states").setSource("foo", "bar"),
|
||||
client().prepareIndex(indexName, "neighborhood", "make-believe").setParent("pittsburgh")
|
||||
.setSource("foo", "bar").setRouting("united states"));
|
||||
|
||||
findsCountry = idsQuery("country").addIds("united states");
|
||||
findsCity = hasParentQuery("country", findsCountry);
|
||||
findsNeighborhood = hasParentQuery("city", findsCity);
|
||||
|
||||
// Make sure we built the parent/child relationship
|
||||
assertSearchHits(client().prepareSearch(indexName).setQuery(findsCity).get(), "pittsburgh");
|
||||
assertSearchHits(client().prepareSearch(indexName).setQuery(findsNeighborhood).get(), "make-believe");
|
||||
}
|
||||
}
|
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.annotations.Name;
|
||||
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
|
||||
|
||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||
import org.elasticsearch.test.rest.RestTestCandidate;
|
||||
import org.elasticsearch.test.rest.parser.RestTestParseException;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class ReindexRestIT extends ESRestTestCase {
|
||||
public ReindexRestIT(@Name("yaml") RestTestCandidate testCandidate) {
|
||||
super(testCandidate);
|
||||
}
|
||||
|
||||
@ParametersFactory
|
||||
public static Iterable<Object[]> parameters() throws IOException, RestTestParseException {
|
||||
return ESRestTestCase.createParameters(0, 1);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,111 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.support.AutoCreateIndex;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.AliasMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
|
||||
/**
|
||||
* Tests that indexing from an index back into itself fails the request.
|
||||
*/
|
||||
public class ReindexSameIndexTests extends ESTestCase {
|
||||
private static final ClusterState STATE = ClusterState.builder(new ClusterName("test")).metaData(MetaData.builder()
|
||||
.put(index("target", "target_alias", "target_multi"), true)
|
||||
.put(index("target2", "target_multi"), true)
|
||||
.put(index("foo"), true)
|
||||
.put(index("bar"), true)
|
||||
.put(index("baz"), true)
|
||||
.put(index("source", "source_multi"), true)
|
||||
.put(index("source2", "source_multi"), true)).build();
|
||||
private static final IndexNameExpressionResolver INDEX_NAME_EXPRESSION_RESOLVER = new IndexNameExpressionResolver(Settings.EMPTY);
|
||||
private static final AutoCreateIndex AUTO_CREATE_INDEX = new AutoCreateIndex(Settings.EMPTY, INDEX_NAME_EXPRESSION_RESOLVER);
|
||||
|
||||
public void testObviousCases() throws Exception {
|
||||
fails("target", "target");
|
||||
fails("target", "foo", "bar", "target", "baz");
|
||||
fails("target", "foo", "bar", "target", "baz", "target");
|
||||
succeeds("target", "source");
|
||||
succeeds("target", "source", "source2");
|
||||
}
|
||||
|
||||
public void testAliasesContainTarget() throws Exception {
|
||||
fails("target", "target_alias");
|
||||
fails("target_alias", "target");
|
||||
fails("target", "foo", "bar", "target_alias", "baz");
|
||||
fails("target_alias", "foo", "bar", "target_alias", "baz");
|
||||
fails("target_alias", "foo", "bar", "target", "baz");
|
||||
fails("target", "foo", "bar", "target_alias", "target_alias");
|
||||
fails("target", "target_multi");
|
||||
fails("target", "foo", "bar", "target_multi", "baz");
|
||||
succeeds("target", "source_multi");
|
||||
succeeds("target", "source", "source2", "source_multi");
|
||||
}
|
||||
|
||||
public void testTargetIsAlias() throws Exception {
|
||||
try {
|
||||
succeeds("target_multi", "foo");
|
||||
fail("Expected failure");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), containsString("Alias [target_multi] has more than one indices associated with it [["));
|
||||
// The index names can come in either order
|
||||
assertThat(e.getMessage(), containsString("target"));
|
||||
assertThat(e.getMessage(), containsString("target2"));
|
||||
}
|
||||
}
|
||||
|
||||
private void fails(String target, String... sources) throws Exception {
|
||||
try {
|
||||
succeeds(target, sources);
|
||||
fail("Expected an exception");
|
||||
} catch (ActionRequestValidationException e) {
|
||||
assertThat(e.getMessage(),
|
||||
containsString("reindex cannot write into an index its reading from [target]"));
|
||||
}
|
||||
}
|
||||
|
||||
private void succeeds(String target, String... sources) throws Exception {
|
||||
TransportReindexAction.validateAgainstAliases(new SearchRequest(sources), new IndexRequest(target), INDEX_NAME_EXPRESSION_RESOLVER,
|
||||
AUTO_CREATE_INDEX, STATE);
|
||||
}
|
||||
|
||||
private static IndexMetaData index(String name, String... aliases) {
|
||||
IndexMetaData.Builder builder = IndexMetaData.builder(name).settings(Settings.builder()
|
||||
.put("index.version.created", Version.CURRENT.id)
|
||||
.put("index.number_of_shards", 1)
|
||||
.put("index.number_of_replicas", 1));
|
||||
for (String alias: aliases) {
|
||||
builder.putAlias(AliasMetaData.builder(alias).build());
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,139 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
|
||||
/**
|
||||
* Tests index-by-search with a script modifying the documents.
|
||||
*/
|
||||
public class ReindexScriptTests extends AbstractAsyncBulkIndexByScrollActionScriptTestCase<ReindexRequest, ReindexResponse> {
|
||||
public void testSetIndex() throws Exception {
|
||||
Object dest = randomFrom(new Object[] {234, 234L, "pancake"});
|
||||
IndexRequest index = applyScript((Map<String, Object> ctx) -> ctx.put("_index", dest));
|
||||
assertEquals(dest.toString(), index.index());
|
||||
}
|
||||
|
||||
public void testSettingIndexToNullIsError() throws Exception {
|
||||
try {
|
||||
applyScript((Map<String, Object> ctx) -> ctx.put("_index", null));
|
||||
} catch (NullPointerException e) {
|
||||
assertThat(e.getMessage(), containsString("Can't reindex without a destination index!"));
|
||||
}
|
||||
}
|
||||
|
||||
public void testSetType() throws Exception {
|
||||
Object type = randomFrom(new Object[] {234, 234L, "pancake"});
|
||||
IndexRequest index = applyScript((Map<String, Object> ctx) -> ctx.put("_type", type));
|
||||
assertEquals(type.toString(), index.type());
|
||||
}
|
||||
|
||||
public void testSettingTypeToNullIsError() throws Exception {
|
||||
try {
|
||||
applyScript((Map<String, Object> ctx) -> ctx.put("_type", null));
|
||||
} catch (NullPointerException e) {
|
||||
assertThat(e.getMessage(), containsString("Can't reindex without a destination type!"));
|
||||
}
|
||||
}
|
||||
|
||||
public void testSetId() throws Exception {
|
||||
Object id = randomFrom(new Object[] {null, 234, 234L, "pancake"});
|
||||
IndexRequest index = applyScript((Map<String, Object> ctx) -> ctx.put("_id", id));
|
||||
if (id == null) {
|
||||
assertNull(index.id());
|
||||
} else {
|
||||
assertEquals(id.toString(), index.id());
|
||||
}
|
||||
}
|
||||
|
||||
public void testSetVersion() throws Exception {
|
||||
Number version = randomFrom(new Number[] {null, 234, 234L});
|
||||
IndexRequest index = applyScript((Map<String, Object> ctx) -> ctx.put("_version", version));
|
||||
if (version == null) {
|
||||
assertEquals(Versions.MATCH_ANY, index.version());
|
||||
} else {
|
||||
assertEquals(version.longValue(), index.version());
|
||||
}
|
||||
}
|
||||
|
||||
public void testSettingVersionToJunkIsAnError() throws Exception {
|
||||
Object junkVersion = randomFrom(new Object[] { "junk", Math.PI });
|
||||
try {
|
||||
applyScript((Map<String, Object> ctx) -> ctx.put("_version", junkVersion));
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), containsString("_version may only be set to an int or a long but was ["));
|
||||
assertThat(e.getMessage(), containsString(junkVersion.toString()));
|
||||
}
|
||||
}
|
||||
|
||||
public void testSetParent() throws Exception {
|
||||
String parent = randomRealisticUnicodeOfLengthBetween(5, 20);
|
||||
IndexRequest index = applyScript((Map<String, Object> ctx) -> ctx.put("_parent", parent));
|
||||
assertEquals(parent, index.parent());
|
||||
}
|
||||
|
||||
public void testSetRouting() throws Exception {
|
||||
String routing = randomRealisticUnicodeOfLengthBetween(5, 20);
|
||||
IndexRequest index = applyScript((Map<String, Object> ctx) -> ctx.put("_routing", routing));
|
||||
assertEquals(routing, index.routing());
|
||||
}
|
||||
|
||||
public void testSetTimestamp() throws Exception {
|
||||
String timestamp = randomFrom(null, "now", "1234");
|
||||
IndexRequest index = applyScript((Map<String, Object> ctx) -> ctx.put("_timestamp", timestamp));
|
||||
assertEquals(timestamp, index.timestamp());
|
||||
}
|
||||
|
||||
public void testSetTtl() throws Exception {
|
||||
Number ttl = randomFrom(new Number[] { null, 1233214, 134143797143L });
|
||||
IndexRequest index = applyScript((Map<String, Object> ctx) -> ctx.put("_ttl", ttl));
|
||||
if (ttl == null) {
|
||||
assertEquals(null, index.ttl());
|
||||
} else {
|
||||
assertEquals(timeValueMillis(ttl.longValue()), index.ttl());
|
||||
}
|
||||
}
|
||||
|
||||
public void testSettingTtlToJunkIsAnError() throws Exception {
|
||||
Object junkTtl = randomFrom(new Object[] { "junk", Math.PI });
|
||||
try {
|
||||
applyScript((Map<String, Object> ctx) -> ctx.put("_ttl", junkTtl));
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), containsString("_ttl may only be set to an int or a long but was ["));
|
||||
assertThat(e.getMessage(), containsString(junkTtl.toString()));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ReindexRequest request() {
|
||||
return new ReindexRequest();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected AbstractAsyncBulkIndexByScrollAction<ReindexRequest, ReindexResponse> action() {
|
||||
return new TransportReindexAction.AsyncIndexBySearchAction(task, logger, null, null, threadPool, request(), listener());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,77 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
||||
import org.hamcrest.Description;
|
||||
import org.hamcrest.Matcher;
|
||||
|
||||
import java.util.Collection;
|
||||
|
||||
import static org.elasticsearch.test.ESIntegTestCase.Scope.SUITE;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
@ClusterScope(scope = SUITE, transportClientRatio = 0)
|
||||
public abstract class ReindexTestCase extends ESIntegTestCase {
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
return pluginList(ReindexPlugin.class);
|
||||
}
|
||||
|
||||
protected ReindexRequestBuilder reindex() {
|
||||
return ReindexAction.INSTANCE.newRequestBuilder(client());
|
||||
}
|
||||
|
||||
public IndexBySearchResponseMatcher responseMatcher() {
|
||||
return new IndexBySearchResponseMatcher();
|
||||
}
|
||||
|
||||
public static class IndexBySearchResponseMatcher
|
||||
extends AbstractBulkIndexByScrollResponseMatcher<ReindexResponse, IndexBySearchResponseMatcher> {
|
||||
private Matcher<Long> createdMatcher = equalTo(0L);
|
||||
|
||||
public IndexBySearchResponseMatcher created(Matcher<Long> updatedMatcher) {
|
||||
this.createdMatcher = updatedMatcher;
|
||||
return this;
|
||||
}
|
||||
|
||||
public IndexBySearchResponseMatcher created(long created) {
|
||||
return created(equalTo(created));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean matchesSafely(ReindexResponse item) {
|
||||
return super.matchesSafely(item) && createdMatcher.matches(item.getCreated());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void describeTo(Description description) {
|
||||
super.describeTo(description);
|
||||
description.appendText(" and created matches ").appendDescriptionOf(createdMatcher);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected IndexBySearchResponseMatcher self() {
|
||||
return this;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,143 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.get.GetResponse;
|
||||
|
||||
import static org.elasticsearch.action.index.IndexRequest.OpType.CREATE;
|
||||
import static org.elasticsearch.index.VersionType.EXTERNAL;
|
||||
import static org.elasticsearch.index.VersionType.INTERNAL;
|
||||
|
||||
|
||||
public class ReindexVersioningTests extends ReindexTestCase {
|
||||
private static final int SOURCE_VERSION = 4;
|
||||
private static final int OLDER_VERSION = 1;
|
||||
private static final int NEWER_VERSION = 10;
|
||||
|
||||
public void testExternalVersioningCreatesWhenAbsentAndSetsVersion() throws Exception {
|
||||
setupSourceAbsent();
|
||||
assertThat(reindexExternal(), responseMatcher().created(1));
|
||||
assertDest("source", SOURCE_VERSION);
|
||||
}
|
||||
|
||||
public void testExternalVersioningUpdatesOnOlderAndSetsVersion() throws Exception {
|
||||
setupDestOlder();
|
||||
assertThat(reindexExternal(), responseMatcher().updated(1));
|
||||
assertDest("source", SOURCE_VERSION);
|
||||
}
|
||||
|
||||
public void testExternalVersioningVersionConflictsOnNewer() throws Exception {
|
||||
setupDestNewer();
|
||||
assertThat(reindexExternal(), responseMatcher().versionConflicts(1));
|
||||
assertDest("dest", NEWER_VERSION);
|
||||
}
|
||||
|
||||
public void testInternalVersioningCreatesWhenAbsent() throws Exception {
|
||||
setupSourceAbsent();
|
||||
assertThat(reindexInternal(), responseMatcher().created(1));
|
||||
assertDest("source", 1);
|
||||
}
|
||||
|
||||
public void testInternalVersioningUpdatesOnOlder() throws Exception {
|
||||
setupDestOlder();
|
||||
assertThat(reindexInternal(), responseMatcher().updated(1));
|
||||
assertDest("source", OLDER_VERSION + 1);
|
||||
}
|
||||
|
||||
public void testInternalVersioningUpdatesOnNewer() throws Exception {
|
||||
setupDestNewer();
|
||||
assertThat(reindexInternal(), responseMatcher().updated(1));
|
||||
assertDest("source", NEWER_VERSION + 1);
|
||||
}
|
||||
|
||||
public void testCreateCreatesWhenAbsent() throws Exception {
|
||||
setupSourceAbsent();
|
||||
assertThat(reindexCreate(), responseMatcher().created(1));
|
||||
assertDest("source", 1);
|
||||
}
|
||||
|
||||
public void testCreateVersionConflictsOnOlder() throws Exception {
|
||||
setupDestOlder();
|
||||
assertThat(reindexCreate(), responseMatcher().versionConflicts(1));
|
||||
assertDest("dest", OLDER_VERSION);
|
||||
}
|
||||
|
||||
public void testCreateVersionConflictsOnNewer() throws Exception {
|
||||
setupDestNewer();
|
||||
assertThat(reindexCreate(), responseMatcher().versionConflicts(1));
|
||||
assertDest("dest", NEWER_VERSION);
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform a reindex with EXTERNAL versioning which has "refresh" semantics.
|
||||
*/
|
||||
private ReindexResponse reindexExternal() {
|
||||
ReindexRequestBuilder reindex = reindex().source("source").destination("dest").abortOnVersionConflict(false);
|
||||
reindex.destination().setVersionType(EXTERNAL);
|
||||
return reindex.get();
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform a reindex with INTERNAL versioning which has "overwrite" semantics.
|
||||
*/
|
||||
private ReindexResponse reindexInternal() {
|
||||
ReindexRequestBuilder reindex = reindex().source("source").destination("dest").abortOnVersionConflict(false);
|
||||
reindex.destination().setVersionType(INTERNAL);
|
||||
return reindex.get();
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform a reindex with CREATE OpType which has "create" semantics.
|
||||
*/
|
||||
private ReindexResponse reindexCreate() {
|
||||
ReindexRequestBuilder reindex = reindex().source("source").destination("dest").abortOnVersionConflict(false);
|
||||
reindex.destination().setOpType(CREATE);
|
||||
return reindex.get();
|
||||
}
|
||||
|
||||
private void setupSourceAbsent() throws Exception {
|
||||
indexRandom(true, client().prepareIndex("source", "test", "test").setVersionType(EXTERNAL)
|
||||
.setVersion(SOURCE_VERSION).setSource("foo", "source"));
|
||||
|
||||
assertEquals(SOURCE_VERSION, client().prepareGet("source", "test", "test").get().getVersion());
|
||||
}
|
||||
|
||||
private void setupDest(int version) throws Exception {
|
||||
setupSourceAbsent();
|
||||
indexRandom(true, client().prepareIndex("dest", "test", "test").setVersionType(EXTERNAL)
|
||||
.setVersion(version).setSource("foo", "dest"));
|
||||
|
||||
assertEquals(version, client().prepareGet("dest", "test", "test").get().getVersion());
|
||||
}
|
||||
|
||||
private void setupDestOlder() throws Exception {
|
||||
setupDest(OLDER_VERSION);
|
||||
}
|
||||
|
||||
private void setupDestNewer() throws Exception {
|
||||
setupDest(NEWER_VERSION);
|
||||
}
|
||||
|
||||
private void assertDest(String fooValue, int version) {
|
||||
GetResponse get = client().prepareGet("dest", "test", "test").get();
|
||||
assertEquals(fooValue, get.getSource().get("foo"));
|
||||
assertEquals(version, get.getVersion());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,198 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.WriteConsistencyLevel;
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse.Failure;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptService.ScriptType;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
import static java.util.Collections.emptyList;
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.singletonList;
|
||||
import static org.apache.lucene.util.TestUtil.randomSimpleString;
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
|
||||
|
||||
/**
|
||||
* Round trip tests for all Streamable things declared in this plugin.
|
||||
*/
|
||||
public class RoundTripTests extends ESTestCase {
|
||||
public void testReindexRequest() throws IOException {
|
||||
ReindexRequest reindex = new ReindexRequest(new SearchRequest(), new IndexRequest());
|
||||
randomRequest(reindex);
|
||||
reindex.getDestination().version(randomFrom(Versions.MATCH_ANY, Versions.MATCH_DELETED, 12L, 1L, 123124L, 12L));
|
||||
reindex.getDestination().index("test");
|
||||
ReindexRequest tripped = new ReindexRequest();
|
||||
roundTrip(reindex, tripped);
|
||||
assertRequestEquals(reindex, tripped);
|
||||
assertEquals(reindex.getDestination().version(), tripped.getDestination().version());
|
||||
assertEquals(reindex.getDestination().index(), tripped.getDestination().index());
|
||||
}
|
||||
|
||||
public void testUpdateByQueryRequest() throws IOException {
|
||||
UpdateByQueryRequest update = new UpdateByQueryRequest(new SearchRequest());
|
||||
randomRequest(update);
|
||||
UpdateByQueryRequest tripped = new UpdateByQueryRequest();
|
||||
roundTrip(update, tripped);
|
||||
assertRequestEquals(update, tripped);
|
||||
}
|
||||
|
||||
private void randomRequest(AbstractBulkIndexByScrollRequest<?> request) {
|
||||
request.getSearchRequest().indices("test");
|
||||
request.getSearchRequest().source().size(between(1, 1000));
|
||||
request.setSize(random().nextBoolean() ? between(1, Integer.MAX_VALUE) : -1);
|
||||
request.setAbortOnVersionConflict(random().nextBoolean());
|
||||
request.setRefresh(rarely());
|
||||
request.setTimeout(TimeValue.parseTimeValue(randomTimeValue(), null, "test"));
|
||||
request.setConsistency(randomFrom(WriteConsistencyLevel.values()));
|
||||
request.setScript(random().nextBoolean() ? null : randomScript());
|
||||
}
|
||||
|
||||
private void assertRequestEquals(AbstractBulkIndexByScrollRequest<?> request,
|
||||
AbstractBulkIndexByScrollRequest<?> tripped) {
|
||||
assertArrayEquals(request.getSearchRequest().indices(), tripped.getSearchRequest().indices());
|
||||
assertEquals(request.getSearchRequest().source().size(), tripped.getSearchRequest().source().size());
|
||||
assertEquals(request.isAbortOnVersionConflict(), tripped.isAbortOnVersionConflict());
|
||||
assertEquals(request.isRefresh(), tripped.isRefresh());
|
||||
assertEquals(request.getTimeout(), tripped.getTimeout());
|
||||
assertEquals(request.getConsistency(), tripped.getConsistency());
|
||||
assertEquals(request.getScript(), tripped.getScript());
|
||||
assertEquals(request.getRetryBackoffInitialTime(), tripped.getRetryBackoffInitialTime());
|
||||
assertEquals(request.getMaxRetries(), tripped.getMaxRetries());
|
||||
}
|
||||
|
||||
public void testBulkByTaskStatus() throws IOException {
|
||||
BulkByScrollTask.Status status = randomStatus();
|
||||
BytesStreamOutput out = new BytesStreamOutput();
|
||||
status.writeTo(out);
|
||||
BulkByScrollTask.Status tripped = new BulkByScrollTask.Status(out.bytes().streamInput());
|
||||
assertTaskStatusEquals(status, tripped);
|
||||
}
|
||||
|
||||
public void testReindexResponse() throws IOException {
|
||||
ReindexResponse response = new ReindexResponse(timeValueMillis(randomPositiveLong()), randomStatus(), randomIndexingFailures(),
|
||||
randomSearchFailures());
|
||||
ReindexResponse tripped = new ReindexResponse();
|
||||
roundTrip(response, tripped);
|
||||
assertResponseEquals(response, tripped);
|
||||
}
|
||||
|
||||
public void testBulkIndexByScrollResponse() throws IOException {
|
||||
BulkIndexByScrollResponse response = new BulkIndexByScrollResponse(timeValueMillis(randomPositiveLong()), randomStatus(),
|
||||
randomIndexingFailures(), randomSearchFailures());
|
||||
BulkIndexByScrollResponse tripped = new BulkIndexByScrollResponse();
|
||||
roundTrip(response, tripped);
|
||||
assertResponseEquals(response, tripped);
|
||||
}
|
||||
|
||||
private BulkByScrollTask.Status randomStatus() {
|
||||
return new BulkByScrollTask.Status(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
||||
randomPositiveInt(), randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
||||
random().nextBoolean() ? null : randomSimpleString(random()));
|
||||
}
|
||||
|
||||
private List<Failure> randomIndexingFailures() {
|
||||
return usually() ? emptyList()
|
||||
: singletonList(new Failure(randomSimpleString(random()), randomSimpleString(random()),
|
||||
randomSimpleString(random()), new IllegalArgumentException("test")));
|
||||
}
|
||||
|
||||
private List<ShardSearchFailure> randomSearchFailures() {
|
||||
if (usually()) {
|
||||
return emptyList();
|
||||
}
|
||||
Index index = new Index(randomSimpleString(random()), "uuid");
|
||||
return singletonList(new ShardSearchFailure(randomSimpleString(random()),
|
||||
new SearchShardTarget(randomSimpleString(random()), index, randomInt()), randomFrom(RestStatus.values())));
|
||||
}
|
||||
|
||||
private void roundTrip(Streamable example, Streamable empty) throws IOException {
|
||||
BytesStreamOutput out = new BytesStreamOutput();
|
||||
example.writeTo(out);
|
||||
empty.readFrom(out.bytes().streamInput());
|
||||
}
|
||||
|
||||
private Script randomScript() {
|
||||
return new Script(randomSimpleString(random()), // Name
|
||||
randomFrom(ScriptType.values()), // Type
|
||||
random().nextBoolean() ? null : randomSimpleString(random()), // Language
|
||||
emptyMap()); // Params
|
||||
}
|
||||
|
||||
private long randomPositiveLong() {
|
||||
long l;
|
||||
do {
|
||||
l = randomLong();
|
||||
} while (l < 0);
|
||||
return l;
|
||||
}
|
||||
|
||||
private int randomPositiveInt() {
|
||||
return randomInt(Integer.MAX_VALUE);
|
||||
}
|
||||
|
||||
private void assertResponseEquals(BulkIndexByScrollResponse expected, BulkIndexByScrollResponse actual) {
|
||||
assertEquals(expected.getTook(), actual.getTook());
|
||||
assertTaskStatusEquals(expected.getStatus(), actual.getStatus());
|
||||
assertEquals(expected.getIndexingFailures().size(), actual.getIndexingFailures().size());
|
||||
for (int i = 0; i < expected.getIndexingFailures().size(); i++) {
|
||||
Failure expectedFailure = expected.getIndexingFailures().get(i);
|
||||
Failure actualFailure = actual.getIndexingFailures().get(i);
|
||||
assertEquals(expectedFailure.getIndex(), actualFailure.getIndex());
|
||||
assertEquals(expectedFailure.getType(), actualFailure.getType());
|
||||
assertEquals(expectedFailure.getId(), actualFailure.getId());
|
||||
assertEquals(expectedFailure.getMessage(), actualFailure.getMessage());
|
||||
assertEquals(expectedFailure.getStatus(), actualFailure.getStatus());
|
||||
}
|
||||
assertEquals(expected.getSearchFailures().size(), actual.getSearchFailures().size());
|
||||
for (int i = 0; i < expected.getSearchFailures().size(); i++) {
|
||||
ShardSearchFailure expectedFailure = expected.getSearchFailures().get(i);
|
||||
ShardSearchFailure actualFailure = actual.getSearchFailures().get(i);
|
||||
assertEquals(expectedFailure.shard(), actualFailure.shard());
|
||||
assertEquals(expectedFailure.status(), actualFailure.status());
|
||||
// We can't use getCause because throwable doesn't implement equals
|
||||
assertEquals(expectedFailure.reason(), actualFailure.reason());
|
||||
}
|
||||
}
|
||||
|
||||
private void assertTaskStatusEquals(BulkByScrollTask.Status expected, BulkByScrollTask.Status actual) {
|
||||
assertEquals(expected.getUpdated(), actual.getUpdated());
|
||||
assertEquals(expected.getCreated(), actual.getCreated());
|
||||
assertEquals(expected.getDeleted(), actual.getDeleted());
|
||||
assertEquals(expected.getBatches(), actual.getBatches());
|
||||
assertEquals(expected.getVersionConflicts(), actual.getVersionConflicts());
|
||||
assertEquals(expected.getNoops(), actual.getNoops());
|
||||
assertEquals(expected.getRetries(), actual.getRetries());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,55 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.script.ExecutableScript;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
public class SimpleExecutableScript implements ExecutableScript {
|
||||
private final Consumer<Map<String, Object>> script;
|
||||
private Map<String, Object> ctx;
|
||||
|
||||
public SimpleExecutableScript(Consumer<Map<String, Object>> script) {
|
||||
this.script = script;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object run() {
|
||||
script.accept(ctx);
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public void setNextVar(String name, Object value) {
|
||||
if ("ctx".equals(name)) {
|
||||
ctx = (Map<String, Object>) value;
|
||||
} else {
|
||||
throw new IllegalArgumentException("Unsupported var [" + name + "]");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object unwrap(Object value) {
|
||||
return value;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,107 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
|
||||
import org.elasticsearch.search.sort.SortOrder;
|
||||
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||
|
||||
public class UpdateByQueryBasicTests extends UpdateByQueryTestCase {
|
||||
public void testBasics() throws Exception {
|
||||
indexRandom(true, client().prepareIndex("test", "test", "1").setSource("foo", "a"),
|
||||
client().prepareIndex("test", "test", "2").setSource("foo", "a"),
|
||||
client().prepareIndex("test", "test", "3").setSource("foo", "b"),
|
||||
client().prepareIndex("test", "test", "4").setSource("foo", "c"));
|
||||
assertHitCount(client().prepareSearch("test").setTypes("test").setSize(0).get(), 4);
|
||||
assertEquals(1, client().prepareGet("test", "test", "1").get().getVersion());
|
||||
assertEquals(1, client().prepareGet("test", "test", "4").get().getVersion());
|
||||
|
||||
// Reindex all the docs
|
||||
assertThat(request().source("test").refresh(true).get(), responseMatcher().updated(4));
|
||||
assertEquals(2, client().prepareGet("test", "test", "1").get().getVersion());
|
||||
assertEquals(2, client().prepareGet("test", "test", "4").get().getVersion());
|
||||
|
||||
// Now none of them
|
||||
assertThat(request().source("test").filter(termQuery("foo", "no_match")).refresh(true).get(), responseMatcher().updated(0));
|
||||
assertEquals(2, client().prepareGet("test", "test", "1").get().getVersion());
|
||||
assertEquals(2, client().prepareGet("test", "test", "4").get().getVersion());
|
||||
|
||||
// Now half of them
|
||||
assertThat(request().source("test").filter(termQuery("foo", "a")).refresh(true).get(), responseMatcher().updated(2));
|
||||
assertEquals(3, client().prepareGet("test", "test", "1").get().getVersion());
|
||||
assertEquals(3, client().prepareGet("test", "test", "2").get().getVersion());
|
||||
assertEquals(2, client().prepareGet("test", "test", "3").get().getVersion());
|
||||
assertEquals(2, client().prepareGet("test", "test", "4").get().getVersion());
|
||||
|
||||
// Limit with size
|
||||
UpdateByQueryRequestBuilder request = request().source("test").size(3).refresh(true);
|
||||
request.source().addSort("foo", SortOrder.ASC);
|
||||
assertThat(request.get(), responseMatcher().updated(3));
|
||||
// Only the first three documents are updated because of sort
|
||||
assertEquals(4, client().prepareGet("test", "test", "1").get().getVersion());
|
||||
assertEquals(4, client().prepareGet("test", "test", "2").get().getVersion());
|
||||
assertEquals(3, client().prepareGet("test", "test", "3").get().getVersion());
|
||||
assertEquals(2, client().prepareGet("test", "test", "4").get().getVersion());
|
||||
}
|
||||
|
||||
public void testRefreshIsFalseByDefault() throws Exception {
|
||||
refreshTestCase(null, false);
|
||||
}
|
||||
|
||||
public void testRefreshFalseDoesntMakeVisible() throws Exception {
|
||||
refreshTestCase(false, false);
|
||||
}
|
||||
|
||||
public void testRefreshTrueMakesVisible() throws Exception {
|
||||
refreshTestCase(true, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes an update_by_query on an index with -1 refresh_interval and
|
||||
* checks that the documents are visible properly.
|
||||
*/
|
||||
private void refreshTestCase(Boolean refresh, boolean visible) throws Exception {
|
||||
CreateIndexRequestBuilder create = client().admin().indices().prepareCreate("test").setSettings("refresh_interval", -1);
|
||||
create.addMapping("test", "{\"dynamic\": \"false\"}");
|
||||
assertAcked(create);
|
||||
ensureYellow();
|
||||
indexRandom(true, client().prepareIndex("test", "test", "1").setSource("foo", "a"),
|
||||
client().prepareIndex("test", "test", "2").setSource("foo", "a"),
|
||||
client().prepareIndex("test", "test", "3").setSource("foo", "b"),
|
||||
client().prepareIndex("test", "test", "4").setSource("foo", "c"));
|
||||
assertHitCount(client().prepareSearch("test").setQuery(matchQuery("foo", "a")).setSize(0).get(), 0);
|
||||
|
||||
// Now make foo searchable
|
||||
assertAcked(client().admin().indices().preparePutMapping("test").setType("test")
|
||||
.setSource("{\"test\": {\"properties\":{\"foo\": {\"type\": \"string\"}}}}"));
|
||||
UpdateByQueryRequestBuilder update = request().source("test");
|
||||
if (refresh != null) {
|
||||
update.refresh(refresh);
|
||||
}
|
||||
assertThat(update.get(), responseMatcher().updated(4));
|
||||
|
||||
assertHitCount(client().prepareSearch("test").setQuery(matchQuery("foo", "a")).setSize(0).get(), visible ? 2 : 0);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,53 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
|
||||
import java.util.Collection;
|
||||
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
/**
|
||||
* Tests that you can actually cancel an update-by-query request and all the plumbing works. Doesn't test all of the different cancellation
|
||||
* places - that is the responsibility of {@link AsyncBulkByScrollActionTests} which have more precise control to simulate failures but do
|
||||
* not exercise important portion of the stack like transport and task management.
|
||||
*/
|
||||
public class UpdateByQueryCancelTests extends UpdateByQueryTestCase {
|
||||
public void testCancel() throws Exception {
|
||||
BulkIndexByScrollResponse response = CancelTestUtils.testCancel(this, request(), UpdateByQueryAction.NAME);
|
||||
|
||||
assertThat(response, responseMatcher().updated(1).reasonCancelled(equalTo("by user request")));
|
||||
refresh("source");
|
||||
assertHitCount(client().prepareSearch("source").setSize(0).setQuery(matchQuery("giraffes", "giraffes")).get(), 1);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int numberOfShards() {
|
||||
return 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
return CancelTestUtils.nodePlugins();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.index.mapper.internal.RoutingFieldMapper;
|
||||
|
||||
public class UpdateByQueryMetadataTests
|
||||
extends AbstractAsyncBulkIndexbyScrollActionMetadataTestCase<UpdateByQueryRequest, BulkIndexByScrollResponse> {
|
||||
public void testRoutingIsCopied() throws Exception {
|
||||
IndexRequest index = new IndexRequest();
|
||||
action().copyMetadata(index, doc(RoutingFieldMapper.NAME, "foo"));
|
||||
assertEquals("foo", index.routing());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TransportUpdateByQueryAction.AsyncIndexBySearchAction action() {
|
||||
return new TransportUpdateByQueryAction.AsyncIndexBySearchAction(task, logger, null, null, threadPool, request(), listener());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected UpdateByQueryRequest request() {
|
||||
return new UpdateByQueryRequest(new SearchRequest());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,52 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
||||
|
||||
import java.util.Collection;
|
||||
|
||||
import static org.elasticsearch.test.ESIntegTestCase.Scope.SUITE;
|
||||
|
||||
@ClusterScope(scope = SUITE, transportClientRatio = 0)
|
||||
public abstract class UpdateByQueryTestCase extends ESIntegTestCase {
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
return pluginList(ReindexPlugin.class);
|
||||
}
|
||||
|
||||
protected UpdateByQueryRequestBuilder request() {
|
||||
return UpdateByQueryAction.INSTANCE.newRequestBuilder(client());
|
||||
}
|
||||
|
||||
public BulkIndexbyScrollResponseMatcher responseMatcher() {
|
||||
return new BulkIndexbyScrollResponseMatcher();
|
||||
}
|
||||
|
||||
public static class BulkIndexbyScrollResponseMatcher extends
|
||||
AbstractBulkIndexByScrollResponseMatcher<BulkIndexByScrollResponse, BulkIndexbyScrollResponseMatcher> {
|
||||
@Override
|
||||
protected BulkIndexbyScrollResponseMatcher self() {
|
||||
return this;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,99 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.get.GetResponse;
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.index.engine.VersionConflictEngineException;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static org.apache.lucene.util.TestUtil.randomSimpleString;
|
||||
import static org.hamcrest.Matchers.either;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
/**
|
||||
* Mutates a document while update-by-query-ing it and asserts that the mutation
|
||||
* always sticks. Update-by-query should never revert documents.
|
||||
*/
|
||||
public class UpdateByQueryWhileModifyingTests extends UpdateByQueryTestCase {
|
||||
private static final int MAX_MUTATIONS = 50;
|
||||
private static final int MAX_ATTEMPTS = 10;
|
||||
|
||||
public void testUpdateWhileReindexing() throws Exception {
|
||||
AtomicReference<String> value = new AtomicReference<>(randomSimpleString(random()));
|
||||
indexRandom(true, client().prepareIndex("test", "test", "test").setSource("test", value.get()));
|
||||
|
||||
AtomicReference<Throwable> failure = new AtomicReference<>();
|
||||
AtomicBoolean keepUpdating = new AtomicBoolean(true);
|
||||
Thread updater = new Thread(() -> {
|
||||
while (keepUpdating.get()) {
|
||||
try {
|
||||
assertThat(request().source("test").refresh(true).abortOnVersionConflict(false).get(), responseMatcher()
|
||||
.updated(either(equalTo(0L)).or(equalTo(1L))).versionConflicts(either(equalTo(0L)).or(equalTo(1L))));
|
||||
} catch (Throwable t) {
|
||||
failure.set(t);
|
||||
}
|
||||
}
|
||||
});
|
||||
updater.start();
|
||||
|
||||
try {
|
||||
for (int i = 0; i < MAX_MUTATIONS; i++) {
|
||||
GetResponse get = client().prepareGet("test", "test", "test").get();
|
||||
assertEquals(value.get(), get.getSource().get("test"));
|
||||
value.set(randomSimpleString(random()));
|
||||
IndexRequestBuilder index = client().prepareIndex("test", "test", "test").setSource("test", value.get())
|
||||
.setRefresh(true);
|
||||
/*
|
||||
* Update by query increments the version number so concurrent
|
||||
* indexes might get version conflict exceptions so we just
|
||||
* blindly retry.
|
||||
*/
|
||||
int attempts = 0;
|
||||
while (true) {
|
||||
attempts++;
|
||||
try {
|
||||
index.setVersion(get.getVersion()).get();
|
||||
break;
|
||||
} catch (VersionConflictEngineException e) {
|
||||
if (attempts >= MAX_ATTEMPTS) {
|
||||
throw new RuntimeException(
|
||||
"Failed to index after [" + MAX_ATTEMPTS + "] attempts. Too many version conflicts!");
|
||||
}
|
||||
logger.info(
|
||||
"Caught expected version conflict trying to perform mutation number {} with version {}. Retrying.",
|
||||
i, get.getVersion());
|
||||
get = client().prepareGet("test", "test", "test").get();
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
keepUpdating.set(false);
|
||||
updater.join(TimeUnit.SECONDS.toMillis(10));
|
||||
if (failure.get() != null) {
|
||||
throw new RuntimeException(failure.get());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,56 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import java.util.Date;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
|
||||
public class UpdateByQueryWithScriptTests
|
||||
extends AbstractAsyncBulkIndexByScrollActionScriptTestCase<UpdateByQueryRequest, BulkIndexByScrollResponse> {
|
||||
public void testModifyingCtxNotAllowed() {
|
||||
/*
|
||||
* Its important that none of these actually match any of the fields.
|
||||
* They don't now, but make sure they still don't match if you add any
|
||||
* more. The point of have many is that they should all present the same
|
||||
* error message to the user, not some ClassCastException.
|
||||
*/
|
||||
Object[] options = new Object[] {"cat", new Object(), 123, new Date(), Math.PI};
|
||||
for (String ctxVar: new String[] {"_index", "_type", "_id", "_version", "_parent", "_routing", "_timestamp", "_ttl"}) {
|
||||
try {
|
||||
applyScript((Map<String, Object> ctx) -> ctx.put(ctxVar, randomFrom(options)));
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), containsString("Modifying [" + ctxVar + "] not allowed"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected UpdateByQueryRequest request() {
|
||||
return new UpdateByQueryRequest();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected AbstractAsyncBulkIndexByScrollAction<UpdateByQueryRequest, BulkIndexByScrollResponse> action() {
|
||||
return new TransportUpdateByQueryAction.AsyncIndexBySearchAction(task, logger, null, null, threadPool,
|
||||
request(), listener());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,363 @@
|
|||
---
|
||||
"Response format for created":
|
||||
- do:
|
||||
index:
|
||||
index: source
|
||||
type: foo
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
body:
|
||||
source:
|
||||
index: source
|
||||
dest:
|
||||
index: dest
|
||||
- match: {created: 1}
|
||||
- match: {updated: 0}
|
||||
- match: {version_conflicts: 0}
|
||||
- match: {batches: 1}
|
||||
- match: {failures: []}
|
||||
- is_true: took
|
||||
- is_false: task
|
||||
|
||||
---
|
||||
"Response format for updated":
|
||||
- do:
|
||||
index:
|
||||
index: source
|
||||
type: foo
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
index:
|
||||
index: dest
|
||||
type: foo
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
body:
|
||||
source:
|
||||
index: source
|
||||
dest:
|
||||
index: dest
|
||||
- match: {created: 0}
|
||||
- match: {updated: 1}
|
||||
- match: {version_conflicts: 0}
|
||||
- match: {batches: 1}
|
||||
- match: {failures: []}
|
||||
- is_true: took
|
||||
- is_false: task
|
||||
|
||||
---
|
||||
"wait_for_completion=false":
|
||||
- do:
|
||||
index:
|
||||
index: source
|
||||
type: foo
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
wait_for_completion: false
|
||||
body:
|
||||
source:
|
||||
index: source
|
||||
dest:
|
||||
index: dest
|
||||
- match: {task: '/.+:\d+/'}
|
||||
- is_false: updated
|
||||
- is_false: version_conflicts
|
||||
- is_false: batches
|
||||
- is_false: failures
|
||||
- is_false: noops
|
||||
- is_false: took
|
||||
- is_false: created
|
||||
|
||||
---
|
||||
"Response format for version conflict":
|
||||
- do:
|
||||
index:
|
||||
index: source
|
||||
type: foo
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
index:
|
||||
index: dest
|
||||
type: foo
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
catch: conflict
|
||||
reindex:
|
||||
body:
|
||||
source:
|
||||
index: source
|
||||
dest:
|
||||
index: dest
|
||||
op_type: create
|
||||
- match: {created: 0}
|
||||
- match: {updated: 0}
|
||||
- match: {version_conflicts: 1}
|
||||
- match: {batches: 1}
|
||||
- match: {failures.0.index: dest}
|
||||
- match: {failures.0.type: foo}
|
||||
- match: {failures.0.id: "1"}
|
||||
- match: {failures.0.status: 409}
|
||||
- match: {failures.0.cause.type: version_conflict_engine_exception}
|
||||
- match: {failures.0.cause.reason: "[foo][1]: version conflict, document already exists (current version [1])"}
|
||||
- match: {failures.0.cause.shard: /\d+/}
|
||||
- match: {failures.0.cause.index: dest}
|
||||
- is_true: took
|
||||
|
||||
---
|
||||
"Response format for version conflict with conflicts=proceed":
|
||||
- do:
|
||||
index:
|
||||
index: source
|
||||
type: foo
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
index:
|
||||
index: dest
|
||||
type: foo
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
body:
|
||||
conflicts: proceed
|
||||
source:
|
||||
index: source
|
||||
dest:
|
||||
index: dest
|
||||
op_type: create
|
||||
- match: {created: 0}
|
||||
- match: {updated: 0}
|
||||
- match: {version_conflicts: 1}
|
||||
- match: {batches: 1}
|
||||
- match: {failures: []}
|
||||
- is_true: took
|
||||
|
||||
---
|
||||
"Simplest example in docs":
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
index: twitter
|
||||
dest:
|
||||
index: new_twitter
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: new_twitter
|
||||
- match: { hits.total: 1 }
|
||||
|
||||
---
|
||||
"Limit by type example in docs":
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: junk
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
index: twitter
|
||||
type: tweet
|
||||
dest:
|
||||
index: new_twitter
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: new_twitter
|
||||
- match: { hits.total: 1 }
|
||||
|
||||
---
|
||||
"Limit by query example in docs":
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 2
|
||||
body: { "user": "junk" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
index: twitter
|
||||
query:
|
||||
match:
|
||||
user: kimchy
|
||||
dest:
|
||||
index: new_twitter
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: new_twitter
|
||||
- match: { hits.total: 1 }
|
||||
|
||||
---
|
||||
"Override type example in docs":
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: junk
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
index: twitter
|
||||
type: tweet
|
||||
dest:
|
||||
index: new_twitter
|
||||
type: chirp
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: new_twitter
|
||||
type: chirp
|
||||
- match: { hits.total: 1 }
|
||||
|
||||
---
|
||||
"Multi index, multi type example from docs":
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
index:
|
||||
index: blog
|
||||
type: post
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
index: [twitter, blog]
|
||||
type: [tweet, post]
|
||||
dest:
|
||||
index: all_together
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: all_together
|
||||
type: tweet
|
||||
body:
|
||||
query:
|
||||
match:
|
||||
user: kimchy
|
||||
- match: { hits.total: 1 }
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: all_together
|
||||
type: post
|
||||
body:
|
||||
query:
|
||||
match:
|
||||
user: kimchy
|
||||
- match: { hits.total: 1 }
|
||||
|
||||
---
|
||||
"Limit by size example from docs":
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 2
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
size: 1
|
||||
source:
|
||||
index: twitter
|
||||
dest:
|
||||
index: new_twitter
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: new_twitter
|
||||
type: tweet
|
||||
- match: { hits.total: 1 }
|
|
@ -0,0 +1,150 @@
|
|||
---
|
||||
"no body fails":
|
||||
- do:
|
||||
catch: /body required/
|
||||
reindex: {}
|
||||
|
||||
---
|
||||
"junk in body fails":
|
||||
- do:
|
||||
catch: /unknown field \[junk\]/
|
||||
reindex:
|
||||
body:
|
||||
junk:
|
||||
more_junk:
|
||||
|
||||
---
|
||||
"junk in source fails":
|
||||
- do:
|
||||
catch: /Unknown key for a START_OBJECT in \[junk\]./
|
||||
reindex:
|
||||
body:
|
||||
source:
|
||||
junk: {}
|
||||
|
||||
---
|
||||
"junk in dest fails":
|
||||
- do:
|
||||
catch: /unknown field \[junk\]/
|
||||
reindex:
|
||||
body:
|
||||
dest:
|
||||
junk: {}
|
||||
|
||||
---
|
||||
"no index on destination fails":
|
||||
- do:
|
||||
catch: /index must be specified/
|
||||
reindex:
|
||||
body:
|
||||
dest: {}
|
||||
|
||||
---
|
||||
"source size is accepted":
|
||||
- do:
|
||||
index:
|
||||
index: source
|
||||
type: foo
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
reindex:
|
||||
body:
|
||||
source:
|
||||
index: source
|
||||
size: 1000
|
||||
dest:
|
||||
index: dest
|
||||
|
||||
---
|
||||
"search size fails if not a number":
|
||||
- do:
|
||||
catch: '/NumberFormatException: For input string: "cat"/'
|
||||
reindex:
|
||||
body:
|
||||
source:
|
||||
size: cat
|
||||
dest:
|
||||
index: dest
|
||||
|
||||
---
|
||||
"search from is not supported":
|
||||
- do:
|
||||
catch: /from is not supported in this context/
|
||||
reindex:
|
||||
body:
|
||||
source:
|
||||
from: 1
|
||||
dest:
|
||||
index: dest
|
||||
|
||||
---
|
||||
"overwriting version is not supported":
|
||||
- do:
|
||||
catch: /.*\[dest\] unknown field \[version\].*/
|
||||
reindex:
|
||||
body:
|
||||
dest:
|
||||
version: 10
|
||||
|
||||
---
|
||||
"bad conflicts is error":
|
||||
- do:
|
||||
catch: /.*conflicts may only be "proceed" or "abort" but was \[cat\]/
|
||||
reindex:
|
||||
body:
|
||||
conflicts: cat
|
||||
|
||||
---
|
||||
"invalid size fails":
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: test
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
catch: /size should be greater than 0 if the request is limited to some number of documents or -1 if it isn't but it was \[-4\]/
|
||||
reindex:
|
||||
body:
|
||||
source:
|
||||
index: test
|
||||
dest:
|
||||
index: dest
|
||||
size: -4
|
||||
|
||||
---
|
||||
"can't set ttl":
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: test
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
catch: /setting ttl on destination isn't supported. use scripts instead./
|
||||
reindex:
|
||||
body:
|
||||
source:
|
||||
index: test
|
||||
dest:
|
||||
index: dest
|
||||
ttl: 3m
|
||||
|
||||
---
|
||||
"can't set timestamp":
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: test
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
catch: /setting timestamp on destination isn't supported. use scripts instead./
|
||||
reindex:
|
||||
body:
|
||||
source:
|
||||
index: test
|
||||
dest:
|
||||
index: dest
|
||||
timestamp: "123"
|
|
@ -0,0 +1,72 @@
|
|||
---
|
||||
"Can limit copied docs by specifying a query":
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: test
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: test
|
||||
id: 2
|
||||
body: { "text": "junk" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
index: test
|
||||
query:
|
||||
match:
|
||||
text: test
|
||||
dest:
|
||||
index: target
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: target
|
||||
- match: { hits.total: 1 }
|
||||
|
||||
---
|
||||
"Sorting and size combined":
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: test
|
||||
id: 1
|
||||
body: { "order": 1 }
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: test
|
||||
id: 2
|
||||
body: { "order": 2 }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
size: 1
|
||||
source:
|
||||
index: test
|
||||
sort: order
|
||||
dest:
|
||||
index: target
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: target
|
||||
- match: { hits.total: 1 }
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: target
|
||||
q: order:1
|
||||
- match: { hits.total: 1 }
|
|
@ -0,0 +1,185 @@
|
|||
# This test relies on setting verion: 2, version_type: external on the source
|
||||
# of the reindex and then manipulates the versioning in the destination.
|
||||
# ReindexVersioningTests is a more thorough, java based version of these tests.
|
||||
|
||||
---
|
||||
"versioning defaults to overwrite":
|
||||
- do:
|
||||
index:
|
||||
index: src
|
||||
type: test
|
||||
id: 1
|
||||
body: { "company": "cat" }
|
||||
version: 2
|
||||
version_type: external
|
||||
- do:
|
||||
index:
|
||||
index: src
|
||||
type: test
|
||||
id: 2
|
||||
body: { "company": "cow" }
|
||||
- do:
|
||||
index:
|
||||
index: dest
|
||||
type: test
|
||||
id: 1
|
||||
body: { "company": "dog" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
index: src
|
||||
dest:
|
||||
index: dest
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: dest
|
||||
q: company:cat
|
||||
- match: { hits.total: 1 }
|
||||
- do:
|
||||
search:
|
||||
index: dest
|
||||
q: company:cow
|
||||
- match: { hits.total: 1 }
|
||||
|
||||
---
|
||||
"op_type can be set to create":
|
||||
- do:
|
||||
index:
|
||||
index: src
|
||||
type: test
|
||||
id: 1
|
||||
body: { "company": "cat" }
|
||||
version: 2
|
||||
version_type: external
|
||||
- do:
|
||||
index:
|
||||
index: src
|
||||
type: test
|
||||
id: 2
|
||||
body: { "company": "cow" }
|
||||
- do:
|
||||
index:
|
||||
index: dest
|
||||
type: test
|
||||
id: 1
|
||||
body: { "company": "dog" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
conflicts: proceed
|
||||
source:
|
||||
index: src
|
||||
dest:
|
||||
index: dest
|
||||
op_type: create
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: dest
|
||||
q: company:dog
|
||||
- match: { hits.total: 1 }
|
||||
- do:
|
||||
search:
|
||||
index: dest
|
||||
q: company:cow
|
||||
- match: { hits.total: 1 }
|
||||
|
||||
---
|
||||
"version_type=external has refresh semantics":
|
||||
- do:
|
||||
index:
|
||||
index: src
|
||||
type: test
|
||||
id: 1
|
||||
body: { "company": "cat" }
|
||||
version: 2
|
||||
version_type: external
|
||||
- do:
|
||||
index:
|
||||
index: src
|
||||
type: test
|
||||
id: 2
|
||||
body: { "company": "cow" }
|
||||
- do:
|
||||
index:
|
||||
index: dest
|
||||
type: test
|
||||
id: 1
|
||||
body: { "company": "dog" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
index: src
|
||||
dest:
|
||||
index: dest
|
||||
version_type: external
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: dest
|
||||
q: company:cat
|
||||
- match: { hits.total: 1 }
|
||||
- do:
|
||||
search:
|
||||
index: dest
|
||||
q: company:cow
|
||||
- match: { hits.total: 1 }
|
||||
|
||||
---
|
||||
"version_type=internal has overwrite semantics":
|
||||
- do:
|
||||
index:
|
||||
index: src
|
||||
type: test
|
||||
id: 1
|
||||
body: { "company": "cat" }
|
||||
- do:
|
||||
index:
|
||||
index: src
|
||||
type: test
|
||||
id: 2
|
||||
body: { "company": "cow" }
|
||||
- do:
|
||||
index:
|
||||
index: dest
|
||||
type: test
|
||||
id: 1
|
||||
body: { "company": "dog" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
index: src
|
||||
dest:
|
||||
index: dest
|
||||
version_type: internal
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: dest
|
||||
q: company:cat
|
||||
- match: { hits.total: 1 }
|
||||
- do:
|
||||
search:
|
||||
index: dest
|
||||
q: company:cow
|
||||
- match: { hits.total: 1 }
|
|
@ -0,0 +1,57 @@
|
|||
---
|
||||
"Set routing":
|
||||
- do:
|
||||
index:
|
||||
index: src
|
||||
type: test
|
||||
id: 1
|
||||
body: { "company": "cat" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
index: src
|
||||
dest:
|
||||
index: dest
|
||||
routing: =cat
|
||||
|
||||
- do:
|
||||
get:
|
||||
index: dest
|
||||
type: test
|
||||
id: 1
|
||||
routing: cat
|
||||
- match: { _routing: cat }
|
||||
|
||||
---
|
||||
"Discard routing":
|
||||
- do:
|
||||
index:
|
||||
index: src
|
||||
type: test
|
||||
id: 1
|
||||
body: { "company": "cat" }
|
||||
routing:
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
index: src
|
||||
dest:
|
||||
index: dest
|
||||
routing: discard
|
||||
|
||||
- do:
|
||||
get:
|
||||
index: dest
|
||||
type: test
|
||||
id: 1
|
||||
- is_false: _routing
|
|
@ -0,0 +1,50 @@
|
|||
---
|
||||
"can override consistency":
|
||||
- do:
|
||||
indices.create:
|
||||
index: dest
|
||||
body:
|
||||
settings:
|
||||
number_of_replicas: 5
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: yellow
|
||||
- do:
|
||||
index:
|
||||
index: src
|
||||
type: test
|
||||
id: 1
|
||||
body: {"text": "test"}
|
||||
consistency: one
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
catch: unavailable
|
||||
reindex:
|
||||
timeout: 1s
|
||||
body:
|
||||
source:
|
||||
index: src
|
||||
dest:
|
||||
index: dest
|
||||
- match:
|
||||
failures.0.cause.reason: /Not.enough.active.copies.to.meet.write.consistency.of.\[QUORUM\].\(have.1,.needed.4\)\..Timeout\:.\[1s\],.request:.\[BulkShardRequest.to.\[dest\].containing.\[1\].requests\]/
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
consistency: one
|
||||
body:
|
||||
source:
|
||||
index: src
|
||||
dest:
|
||||
index: dest
|
||||
- match: {failures: []}
|
||||
- match: {created: 1}
|
||||
- match: {version_conflicts: 0}
|
||||
|
||||
- do:
|
||||
get:
|
||||
index: dest
|
||||
type: test
|
||||
id: 1
|
|
@ -0,0 +1,212 @@
|
|||
---
|
||||
"Basic response":
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: foo
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
update-by-query:
|
||||
index: test
|
||||
- match: {updated: 1}
|
||||
- match: {version_conflicts: 0}
|
||||
- match: {batches: 1}
|
||||
- match: {failures: []}
|
||||
- match: {noops: 0}
|
||||
- is_true: took
|
||||
- is_false: created # Update by query can't create
|
||||
- is_false: task
|
||||
|
||||
---
|
||||
"wait_for_completion=false":
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: foo
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
update-by-query:
|
||||
wait_for_completion: false
|
||||
index: test
|
||||
- match: {task: '/.+:\d+/'}
|
||||
- is_false: updated
|
||||
- is_false: version_conflicts
|
||||
- is_false: batches
|
||||
- is_false: failures
|
||||
- is_false: noops
|
||||
- is_false: took
|
||||
- is_false: created
|
||||
|
||||
---
|
||||
"Response for version conflict":
|
||||
- do:
|
||||
indices.create:
|
||||
index: test
|
||||
body:
|
||||
settings:
|
||||
index.refresh_interval: -1
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: foo
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
- do: # Creates a new version for reindex to miss on scan.
|
||||
index:
|
||||
index: test
|
||||
type: foo
|
||||
id: 1
|
||||
body: { "text": "test2" }
|
||||
|
||||
- do:
|
||||
catch: conflict
|
||||
update-by-query:
|
||||
index: test
|
||||
- match: {updated: 0}
|
||||
- match: {version_conflicts: 1}
|
||||
- match: {batches: 1}
|
||||
- match: {failures.0.index: test}
|
||||
- match: {failures.0.type: foo}
|
||||
- match: {failures.0.id: "1"}
|
||||
- match: {failures.0.status: 409}
|
||||
- match: {failures.0.cause.type: version_conflict_engine_exception}
|
||||
- match: {failures.0.cause.reason: "[foo][1]: version conflict, current version [2] is different than the one provided [1]"}
|
||||
- match: {failures.0.cause.shard: /\d+/}
|
||||
- match: {failures.0.cause.index: test}
|
||||
- is_true: took
|
||||
|
||||
---
|
||||
"Response for version conflict with conflicts=proceed":
|
||||
- do:
|
||||
indices.create:
|
||||
index: test
|
||||
body:
|
||||
settings:
|
||||
index.refresh_interval: -1
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: foo
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
- do: # Creates a new version for reindex to miss on scan.
|
||||
index:
|
||||
index: test
|
||||
type: foo
|
||||
id: 1
|
||||
body: { "text": "test2" }
|
||||
|
||||
- do:
|
||||
update-by-query:
|
||||
index: test
|
||||
conflicts: proceed
|
||||
- match: {updated: 0}
|
||||
- match: {version_conflicts: 1}
|
||||
- match: {batches: 1}
|
||||
- match: {noops: 0}
|
||||
- match: {failures: []}
|
||||
- is_true: took
|
||||
|
||||
---
|
||||
"Limit by query":
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 2
|
||||
body: { "user": "junk" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
update-by-query:
|
||||
index: twitter
|
||||
body:
|
||||
query:
|
||||
match:
|
||||
user: kimchy
|
||||
- match: {updated: 1}
|
||||
- match: {version_conflicts: 0}
|
||||
- match: {batches: 1}
|
||||
- match: {failures: []}
|
||||
- is_true: took
|
||||
|
||||
---
|
||||
"Limit by size":
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 2
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
update-by-query:
|
||||
index: twitter
|
||||
size: 1
|
||||
- match: {updated: 1}
|
||||
- match: {version_conflicts: 0}
|
||||
- match: {batches: 1}
|
||||
- match: {failures: []}
|
||||
- is_true: took
|
||||
|
||||
---
|
||||
"Can override scroll_size":
|
||||
- do:
|
||||
indices.create:
|
||||
index: test
|
||||
body:
|
||||
settings:
|
||||
number_of_shards: 1
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: yellow
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: foo
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: foo
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: foo
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
update-by-query:
|
||||
index: test
|
||||
scroll_size: 1
|
||||
- match: {batches: 3}
|
|
@ -0,0 +1,41 @@
|
|||
---
|
||||
"invalid conflicts fails":
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: test
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
catch: /conflicts may only be .* but was \[cat\]/
|
||||
update-by-query:
|
||||
index: test
|
||||
conflicts: cat
|
||||
|
||||
---
|
||||
"invalid scroll_size fails":
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: test
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
catch: /Failed to parse int parameter \[scroll_size\] with value \[cat\]/
|
||||
update-by-query:
|
||||
index: test
|
||||
scroll_size: cat
|
||||
|
||||
---
|
||||
"invalid size fails":
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: test
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
catch: /size should be greater than 0 if the request is limited to some number of documents or -1 if it isn't but it was \[-4\]/
|
||||
update-by-query:
|
||||
index: test
|
||||
size: -4
|
|
@ -0,0 +1,58 @@
|
|||
---
|
||||
"Update-by-query picks up new fields":
|
||||
- do:
|
||||
indices.create:
|
||||
index: test
|
||||
body:
|
||||
mappings:
|
||||
place:
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: yellow
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: place
|
||||
id: 1
|
||||
refresh: true
|
||||
body: { "name": "bob's house" }
|
||||
|
||||
- do:
|
||||
indices.put_mapping:
|
||||
index: test
|
||||
type: place
|
||||
body:
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
fields:
|
||||
english:
|
||||
type: string
|
||||
analyzer: english
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: test
|
||||
body:
|
||||
query:
|
||||
match:
|
||||
name.english: bob
|
||||
- match: { hits.total: 0 }
|
||||
|
||||
- do:
|
||||
update-by-query:
|
||||
index: test
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: test
|
||||
body:
|
||||
query:
|
||||
match:
|
||||
name.english: bob
|
||||
- match: { hits.total: 1 }
|
|
@ -0,0 +1,23 @@
|
|||
---
|
||||
"update-by-query increments the version number":
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: test
|
||||
id: 1
|
||||
body: {"text": "test"}
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
update-by-query:
|
||||
index: test
|
||||
- match: {updated: 1}
|
||||
- match: {version_conflicts: 0}
|
||||
|
||||
- do:
|
||||
get:
|
||||
index: test
|
||||
type: test
|
||||
id: 1
|
||||
- match: {_version: 2}
|
|
@ -0,0 +1,42 @@
|
|||
---
|
||||
"can override consistency":
|
||||
- do:
|
||||
indices.create:
|
||||
index: test
|
||||
body:
|
||||
settings:
|
||||
number_of_replicas: 5
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: yellow
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: test
|
||||
id: 1
|
||||
body: {"text": "test"}
|
||||
consistency: one
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
catch: unavailable
|
||||
update-by-query:
|
||||
index: test
|
||||
timeout: 1s
|
||||
- match:
|
||||
failures.0.cause.reason: /Not.enough.active.copies.to.meet.write.consistency.of.\[QUORUM\].\(have.1,.needed.4\)..Timeout\:.\[1s\],.request:.\[BulkShardRequest.to.\[test\].containing.\[1\].requests\]/
|
||||
|
||||
- do:
|
||||
update-by-query:
|
||||
index: test
|
||||
consistency: one
|
||||
- match: {failures: []}
|
||||
- match: {updated: 1}
|
||||
- match: {version_conflicts: 0}
|
||||
|
||||
- do:
|
||||
get:
|
||||
index: test
|
||||
type: test
|
||||
id: 1
|
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
apply plugin: 'elasticsearch.rest-test'
|
||||
|
||||
integTest {
|
||||
cluster {
|
||||
systemProperty 'es.script.inline', 'true'
|
||||
}
|
||||
}
|
|
@ -0,0 +1,39 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.smoketest;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.annotations.Name;
|
||||
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
|
||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||
import org.elasticsearch.test.rest.RestTestCandidate;
|
||||
import org.elasticsearch.test.rest.parser.RestTestParseException;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class SmokeTestReindexWithGroovyIT extends ESRestTestCase {
|
||||
public SmokeTestReindexWithGroovyIT(@Name("yaml") RestTestCandidate testCandidate) {
|
||||
super(testCandidate);
|
||||
}
|
||||
|
||||
@ParametersFactory
|
||||
public static Iterable<Object[]> parameters() throws IOException, RestTestParseException {
|
||||
return ESRestTestCase.createParameters(0, 1);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,397 @@
|
|||
---
|
||||
"Modify a document":
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
index: twitter
|
||||
dest:
|
||||
index: new_twitter
|
||||
script:
|
||||
inline: ctx._source.user = "other" + ctx._source.user
|
||||
- match: {created: 1}
|
||||
- match: {noops: 0}
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: new_twitter
|
||||
body:
|
||||
query:
|
||||
match:
|
||||
user: otherkimchy
|
||||
- match: { hits.total: 1 }
|
||||
|
||||
---
|
||||
"Modify a document based on id":
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 2
|
||||
body: { "user": "blort" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
index: twitter
|
||||
dest:
|
||||
index: new_twitter
|
||||
script:
|
||||
inline: if (ctx._id == "1") {ctx._source.user = "other" + ctx._source.user}
|
||||
- match: {created: 2}
|
||||
- match: {noops: 0}
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: new_twitter
|
||||
body:
|
||||
query:
|
||||
match:
|
||||
user: otherkimchy
|
||||
- match: { hits.total: 1 }
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: new_twitter
|
||||
body:
|
||||
query:
|
||||
match:
|
||||
user: blort
|
||||
- match: { hits.total: 1 }
|
||||
|
||||
---
|
||||
"Add new parent":
|
||||
- do:
|
||||
indices.create:
|
||||
index: new_twitter
|
||||
body:
|
||||
mappings:
|
||||
tweet:
|
||||
_parent: { type: "user" }
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: yellow
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
index:
|
||||
index: new_twitter
|
||||
type: user
|
||||
id: kimchy
|
||||
body: { "name": "kimchy" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
index: twitter
|
||||
dest:
|
||||
index: new_twitter
|
||||
script:
|
||||
inline: ctx._parent = ctx._source.user
|
||||
- match: {created: 1}
|
||||
- match: {noops: 0}
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: new_twitter
|
||||
body:
|
||||
query:
|
||||
has_parent:
|
||||
parent_type: user
|
||||
query:
|
||||
match:
|
||||
name: kimchy
|
||||
- match: { hits.total: 1 }
|
||||
- match: { hits.hits.0._source.user: kimchy }
|
||||
|
||||
---
|
||||
"Add routing":
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 2
|
||||
body: { "user": "foo" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
index: twitter
|
||||
dest:
|
||||
index: new_twitter
|
||||
script:
|
||||
inline: ctx._routing = ctx._source.user
|
||||
- match: {created: 2}
|
||||
- match: {noops: 0}
|
||||
|
||||
- do:
|
||||
get:
|
||||
index: new_twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
routing: kimchy
|
||||
- match: { _routing: kimchy }
|
||||
|
||||
- do:
|
||||
get:
|
||||
index: new_twitter
|
||||
type: tweet
|
||||
id: 2
|
||||
routing: foo
|
||||
- match: { _routing: foo }
|
||||
|
||||
---
|
||||
"Add routing and parent":
|
||||
- do:
|
||||
indices.create:
|
||||
index: new_twitter
|
||||
body:
|
||||
mappings:
|
||||
tweet:
|
||||
_parent: { type: "user" }
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: yellow
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
index:
|
||||
index: new_twitter
|
||||
type: user
|
||||
id: kimchy
|
||||
body: { "name": "kimchy" }
|
||||
routing: cat
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
index: twitter
|
||||
dest:
|
||||
index: new_twitter
|
||||
script:
|
||||
inline: ctx._parent = ctx._source.user; ctx._routing = "cat"
|
||||
- match: {created: 1}
|
||||
- match: {noops: 0}
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: new_twitter
|
||||
routing: cat
|
||||
body:
|
||||
query:
|
||||
has_parent:
|
||||
parent_type: user
|
||||
query:
|
||||
match:
|
||||
name: kimchy
|
||||
- match: { hits.total: 1 }
|
||||
- match: { hits.hits.0._source.user: kimchy }
|
||||
- match: { hits.hits.0._routing: cat }
|
||||
|
||||
---
|
||||
"Noop one doc":
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 2
|
||||
body: { "user": "foo" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
index: twitter
|
||||
dest:
|
||||
index: new_twitter
|
||||
script:
|
||||
inline: if (ctx._source.user == "kimchy") {ctx._source.user = "not" + ctx._source.user} else {ctx.op = "noop"}
|
||||
- match: {created: 1}
|
||||
- match: {noops: 1}
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: new_twitter
|
||||
body:
|
||||
query:
|
||||
match:
|
||||
user: notkimchy
|
||||
- match: { hits.total: 1 }
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: twitter
|
||||
body:
|
||||
query:
|
||||
match:
|
||||
user: notfoo
|
||||
- match: { hits.total: 0 }
|
||||
|
||||
---
|
||||
"Noop all docs":
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 2
|
||||
body: { "user": "foo" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
body:
|
||||
source:
|
||||
index: twitter
|
||||
dest:
|
||||
index: new_twitter
|
||||
script:
|
||||
inline: ctx.op = "noop"
|
||||
- match: {updated: 0}
|
||||
- match: {noops: 2}
|
||||
|
||||
---
|
||||
"Set version to null to force an update":
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
version: 1
|
||||
version_type: external
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
index:
|
||||
index: new_twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
version: 1
|
||||
version_type: external
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
index: twitter
|
||||
dest:
|
||||
index: new_twitter
|
||||
version_type: external
|
||||
script:
|
||||
inline: ctx._source.user = "other" + ctx._source.user; ctx._version = null
|
||||
- match: {updated: 1}
|
||||
- match: {noops: 0}
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: new_twitter
|
||||
body:
|
||||
query:
|
||||
match:
|
||||
user: otherkimchy
|
||||
- match: { hits.total: 1 }
|
||||
|
||||
---
|
||||
"Set id to null to get an automatic id":
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
index:
|
||||
index: new_twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
index: twitter
|
||||
dest:
|
||||
index: new_twitter
|
||||
script:
|
||||
inline: ctx._source.user = "other" + ctx._source.user; ctx._id = null
|
||||
- match: {created: 1}
|
||||
- match: {noops: 0}
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: new_twitter
|
||||
body:
|
||||
query:
|
||||
match:
|
||||
user: otherkimchy
|
||||
- match: { hits.total: 1 }
|
|
@ -0,0 +1,23 @@
|
|||
---
|
||||
"Totally broken scripts report the error properly":
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
catch: request
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
index: twitter
|
||||
dest:
|
||||
index: new_twitter
|
||||
script:
|
||||
inline: syntax errors are fun!
|
||||
- match: {error.reason: 'Failed to compile inline script [syntax errors are fun!] using lang [groovy]'}
|
|
@ -0,0 +1,140 @@
|
|||
---
|
||||
"Update a document using update-by-query":
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
update-by-query:
|
||||
index: twitter
|
||||
refresh: true
|
||||
body:
|
||||
script:
|
||||
inline: ctx._source.user = "not" + ctx._source.user
|
||||
- match: {updated: 1}
|
||||
- match: {noops: 0}
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: twitter
|
||||
body:
|
||||
query:
|
||||
match:
|
||||
user: notkimchy
|
||||
- match: { hits.total: 1 }
|
||||
|
||||
---
|
||||
"Noop one doc":
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 2
|
||||
body: { "user": "foo" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
update-by-query:
|
||||
refresh: true
|
||||
index: twitter
|
||||
body:
|
||||
script:
|
||||
inline: if (ctx._source.user == "kimchy") {ctx._source.user = "not" + ctx._source.user} else {ctx.op = "noop"}
|
||||
- match: {updated: 1}
|
||||
- match: {noops: 1}
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: twitter
|
||||
body:
|
||||
query:
|
||||
match:
|
||||
user: notkimchy
|
||||
- match: { hits.total: 1 }
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: twitter
|
||||
body:
|
||||
query:
|
||||
match:
|
||||
user: notfoo
|
||||
- match: { hits.total: 0 }
|
||||
|
||||
---
|
||||
"Noop all docs":
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 2
|
||||
body: { "user": "foo" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
update-by-query:
|
||||
refresh: true
|
||||
index: twitter
|
||||
body:
|
||||
script:
|
||||
inline: ctx.op = "noop"
|
||||
- match: {updated: 0}
|
||||
- match: {noops: 2}
|
||||
- match: {batches: 1}
|
||||
|
||||
---
|
||||
"Setting bogus ctx is an error":
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
catch: /Invalid fields added to ctx \[junk\]/
|
||||
update-by-query:
|
||||
index: twitter
|
||||
body:
|
||||
script:
|
||||
inline: ctx.junk = "stuff"
|
||||
|
||||
---
|
||||
"Can't change _id":
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
catch: /Modifying \[_id\] not allowed/
|
||||
update-by-query:
|
||||
index: twitter
|
||||
body:
|
||||
script:
|
||||
inline: ctx._id = "stuff"
|
|
@ -0,0 +1,20 @@
|
|||
---
|
||||
"Totally broken scripts report the error properly":
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
catch: request
|
||||
update-by-query:
|
||||
index: twitter
|
||||
refresh: true
|
||||
body:
|
||||
script:
|
||||
inline: syntax errors are fun!
|
||||
- match: {error.reason: 'Failed to compile inline script [syntax errors are fun!] using lang [groovy]'}
|
|
@ -0,0 +1,35 @@
|
|||
{
|
||||
"reindex": {
|
||||
"documentation": "https://www.elastic.co/guide/en/elasticsearch/plugins/master/plugins-reindex.html",
|
||||
"methods": ["POST"],
|
||||
"url": {
|
||||
"path": "/_reindex",
|
||||
"paths": ["/_reindex"],
|
||||
"parts": {},
|
||||
"params": {
|
||||
"refresh": {
|
||||
"type" : "boolean",
|
||||
"description" : "Should the effected indexes be refreshed?"
|
||||
},
|
||||
"timeout": {
|
||||
"type" : "time",
|
||||
"default": "1m",
|
||||
"description" : "Time each individual bulk request should wait for shards that are unavailable."
|
||||
},
|
||||
"consistency": {
|
||||
"type" : "enum",
|
||||
"options" : ["one", "quorum", "all"],
|
||||
"description" : "Explicit write consistency setting for the operation"
|
||||
},
|
||||
"wait_for_completion": {
|
||||
"type" : "boolean",
|
||||
"default": false,
|
||||
"description" : "Should the request should block until the reindex is complete."
|
||||
}
|
||||
}
|
||||
},
|
||||
"body": {
|
||||
"description": "The search definition using the Query DSL and the prototype for the index request."
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,200 @@
|
|||
{
|
||||
"update-by-query": {
|
||||
"documentation": "https://www.elastic.co/guide/en/elasticsearch/plugins/master/plugins-reindex.html",
|
||||
"methods": ["POST"],
|
||||
"url": {
|
||||
"path": "/{index}/_update_by_query",
|
||||
"paths": ["/{index}/_update_by_query", "/{index}/{type}/_update_by_query"],
|
||||
"comment": "most things below this are just copied from search.json",
|
||||
"parts": {
|
||||
"index": {
|
||||
"type" : "list",
|
||||
"description" : "A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices"
|
||||
},
|
||||
"type": {
|
||||
"type" : "list",
|
||||
"description" : "A comma-separated list of document types to search; leave empty to perform the operation on all types"
|
||||
}
|
||||
},
|
||||
"params": {
|
||||
"analyzer": {
|
||||
"type" : "string",
|
||||
"description" : "The analyzer to use for the query string"
|
||||
},
|
||||
"analyze_wildcard": {
|
||||
"type" : "boolean",
|
||||
"description" : "Specify whether wildcard and prefix queries should be analyzed (default: false)"
|
||||
},
|
||||
"default_operator": {
|
||||
"type" : "enum",
|
||||
"options" : ["AND","OR"],
|
||||
"default" : "OR",
|
||||
"description" : "The default operator for query string query (AND or OR)"
|
||||
},
|
||||
"df": {
|
||||
"type" : "string",
|
||||
"description" : "The field to use as default where no field prefix is given in the query string"
|
||||
},
|
||||
"explain": {
|
||||
"type" : "boolean",
|
||||
"description" : "Specify whether to return detailed information about score computation as part of a hit"
|
||||
},
|
||||
"fields": {
|
||||
"type" : "list",
|
||||
"description" : "A comma-separated list of fields to return as part of a hit"
|
||||
},
|
||||
"fielddata_fields": {
|
||||
"type" : "list",
|
||||
"description" : "A comma-separated list of fields to return as the field data representation of a field for each hit"
|
||||
},
|
||||
"from": {
|
||||
"type" : "number",
|
||||
"description" : "Starting offset (default: 0)"
|
||||
},
|
||||
"ignore_unavailable": {
|
||||
"type" : "boolean",
|
||||
"description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)"
|
||||
},
|
||||
"allow_no_indices": {
|
||||
"type" : "boolean",
|
||||
"description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)"
|
||||
},
|
||||
"conflicts": {
|
||||
"note": "This is not copied from search",
|
||||
"type" : "enum",
|
||||
"options": ["abort", "proceed"],
|
||||
"default": "abort",
|
||||
"description" : "What to do when the reindex hits version conflicts?"
|
||||
},
|
||||
"expand_wildcards": {
|
||||
"type" : "enum",
|
||||
"options" : ["open","closed","none","all"],
|
||||
"default" : "open",
|
||||
"description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both."
|
||||
},
|
||||
"lenient": {
|
||||
"type" : "boolean",
|
||||
"description" : "Specify whether format-based query failures (such as providing text to a numeric field) should be ignored"
|
||||
},
|
||||
"lowercase_expanded_terms": {
|
||||
"type" : "boolean",
|
||||
"description" : "Specify whether query terms should be lowercased"
|
||||
},
|
||||
"preference": {
|
||||
"type" : "string",
|
||||
"description" : "Specify the node or shard the operation should be performed on (default: random)"
|
||||
},
|
||||
"q": {
|
||||
"type" : "string",
|
||||
"description" : "Query in the Lucene query string syntax"
|
||||
},
|
||||
"routing": {
|
||||
"type" : "list",
|
||||
"description" : "A comma-separated list of specific routing values"
|
||||
},
|
||||
"scroll": {
|
||||
"type" : "duration",
|
||||
"description" : "Specify how long a consistent view of the index should be maintained for scrolled search"
|
||||
},
|
||||
"search_type": {
|
||||
"type" : "enum",
|
||||
"options" : ["query_then_fetch", "dfs_query_then_fetch"],
|
||||
"description" : "Search operation type"
|
||||
},
|
||||
"size": {
|
||||
"type" : "number",
|
||||
"description" : "Number of hits to return (default: 10)"
|
||||
},
|
||||
"sort": {
|
||||
"type" : "list",
|
||||
"description" : "A comma-separated list of <field>:<direction> pairs"
|
||||
},
|
||||
"_source": {
|
||||
"type" : "list",
|
||||
"description" : "True or false to return the _source field or not, or a list of fields to return"
|
||||
},
|
||||
"_source_exclude": {
|
||||
"type" : "list",
|
||||
"description" : "A list of fields to exclude from the returned _source field"
|
||||
},
|
||||
"_source_include": {
|
||||
"type" : "list",
|
||||
"description" : "A list of fields to extract and return from the _source field"
|
||||
},
|
||||
"terminate_after": {
|
||||
"type" : "number",
|
||||
"description" : "The maximum number of documents to collect for each shard, upon reaching which the query execution will terminate early."
|
||||
},
|
||||
"stats": {
|
||||
"type" : "list",
|
||||
"description" : "Specific 'tag' of the request for logging and statistical purposes"
|
||||
},
|
||||
"suggest_field": {
|
||||
"type" : "string",
|
||||
"description" : "Specify which field to use for suggestions"
|
||||
},
|
||||
"suggest_mode": {
|
||||
"type" : "enum",
|
||||
"options" : ["missing", "popular", "always"],
|
||||
"default" : "missing",
|
||||
"description" : "Specify suggest mode"
|
||||
},
|
||||
"suggest_size": {
|
||||
"type" : "number",
|
||||
"description" : "How many suggestions to return in response"
|
||||
},
|
||||
"suggest_text": {
|
||||
"type" : "text",
|
||||
"description" : "The source text for which the suggestions should be returned"
|
||||
},
|
||||
"timeout": {
|
||||
"type" : "time",
|
||||
"description" : "Explicit operation timeout"
|
||||
},
|
||||
"track_scores": {
|
||||
"type" : "boolean",
|
||||
"description": "Whether to calculate and return scores even if they are not used for sorting"
|
||||
},
|
||||
"version": {
|
||||
"type" : "boolean",
|
||||
"description" : "Specify whether to return document version as part of a hit"
|
||||
},
|
||||
"version_type": {
|
||||
"type" : "boolean",
|
||||
"description" : "Should the document increment the version number (internal) on hit or not (reindex)"
|
||||
},
|
||||
"request_cache": {
|
||||
"type" : "boolean",
|
||||
"description" : "Specify if request cache should be used for this request or not, defaults to index level setting"
|
||||
},
|
||||
"refresh": {
|
||||
"type" : "boolean",
|
||||
"description" : "Should the effected indexes be refreshed?"
|
||||
},
|
||||
"timeout": {
|
||||
"type" : "time",
|
||||
"default": "1m",
|
||||
"description" : "Time each individual bulk request should wait for shards that are unavailable."
|
||||
},
|
||||
"consistency": {
|
||||
"type" : "enum",
|
||||
"options" : ["one", "quorum", "all"],
|
||||
"description" : "Explicit write consistency setting for the operation"
|
||||
},
|
||||
"scroll_size": {
|
||||
"type": "integer",
|
||||
"defaut_value": 100,
|
||||
"description": "Size on the scroll request powering the update-by-query"
|
||||
},
|
||||
"wait_for_completion": {
|
||||
"type" : "boolean",
|
||||
"default": false,
|
||||
"description" : "Should the request should block until the reindex is complete."
|
||||
}
|
||||
}
|
||||
},
|
||||
"body": {
|
||||
"description": "The search definition using the Query DSL"
|
||||
}
|
||||
}
|
||||
}
|
|
@ -16,6 +16,7 @@ List projects = [
|
|||
'modules:lang-groovy',
|
||||
'modules:lang-mustache',
|
||||
'modules:lang-painless',
|
||||
'modules:reindex',
|
||||
'plugins:analysis-icu',
|
||||
'plugins:analysis-kuromoji',
|
||||
'plugins:analysis-phonetic',
|
||||
|
@ -40,6 +41,7 @@ List projects = [
|
|||
'qa:evil-tests',
|
||||
'qa:smoke-test-client',
|
||||
'qa:smoke-test-multinode',
|
||||
'qa:smoke-test-reindex-with-groovy',
|
||||
'qa:smoke-test-plugins',
|
||||
'qa:smoke-test-ingest-with-all-dependencies',
|
||||
'qa:smoke-test-ingest-disabled',
|
||||
|
@ -89,4 +91,3 @@ if (xplugins.exists()) {
|
|||
addSubProjects(':x-plugins', extraPluginDir)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.rest;
|
||||
package org.elasticsearch.test.client;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.Action;
|
||||
|
@ -38,7 +38,10 @@ public class NoOpClient extends AbstractClient {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
|
||||
protected <Request extends ActionRequest<Request>,
|
||||
Response extends ActionResponse,
|
||||
RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>>
|
||||
void doExecute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
|
||||
listener.onResponse(null);
|
||||
}
|
||||
|
|
@ -140,6 +140,7 @@ public class DoSection implements ExecutableSection {
|
|||
catches.put("conflict", tuple("409", equalTo(409)));
|
||||
catches.put("forbidden", tuple("403", equalTo(403)));
|
||||
catches.put("request_timeout", tuple("408", equalTo(408)));
|
||||
catches.put("unavailable", tuple("503", equalTo(503)));
|
||||
catches.put("request", tuple("4xx|5xx", allOf(greaterThanOrEqualTo(400), not(equalTo(404)), not(equalTo(408)), not(equalTo(409)), not(equalTo(403)))));
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue