mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-03-09 14:34:43 +00:00
Basic reindex and update_by_query
This creates an reindex plugin with a very basic implementation that is very like delete-by-query. New we'll integrate it with the task managament work but for now this works.
This commit is contained in:
parent
f8026ed8fd
commit
85797aeb50
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch.action.bulk;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.ActionWriteResponse;
|
||||
import org.elasticsearch.action.delete.DeleteResponse;
|
||||
@ -27,6 +28,9 @@ import org.elasticsearch.action.update.UpdateResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
@ -40,7 +44,15 @@ public class BulkItemResponse implements Streamable {
|
||||
/**
|
||||
* Represents a failure.
|
||||
*/
|
||||
public static class Failure {
|
||||
public static class Failure implements Writeable<Failure>, ToXContent {
|
||||
static final String INDEX_FIELD = "index";
|
||||
static final String TYPE_FIELD = "type";
|
||||
static final String ID_FIELD = "id";
|
||||
static final String CAUSE_FIELD = "cause";
|
||||
static final String STATUS_FIELD = "status";
|
||||
|
||||
public static final Failure PROTOTYPE = new Failure(null, null, null, null);
|
||||
|
||||
private final String index;
|
||||
private final String type;
|
||||
private final String id;
|
||||
@ -90,9 +102,39 @@ public class BulkItemResponse implements Streamable {
|
||||
return this.status;
|
||||
}
|
||||
|
||||
/**
|
||||
* The actual cause of the failure.
|
||||
*/
|
||||
public Throwable getCause() {
|
||||
return cause;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Failure readFrom(StreamInput in) throws IOException {
|
||||
return new Failure(in.readString(), in.readString(), in.readOptionalString(), in.readThrowable());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(getIndex());
|
||||
out.writeString(getType());
|
||||
out.writeOptionalString(getId());
|
||||
out.writeThrowable(getCause());
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field(INDEX_FIELD, index);
|
||||
builder.field(TYPE_FIELD, type);
|
||||
if (id != null) {
|
||||
builder.field(ID_FIELD, id);
|
||||
}
|
||||
builder.startObject(CAUSE_FIELD);
|
||||
ElasticsearchException.toXContent(builder, params, cause);
|
||||
builder.endObject();
|
||||
builder.field(STATUS_FIELD, status.getStatus());
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
|
||||
private int id;
|
||||
@ -257,11 +299,7 @@ public class BulkItemResponse implements Streamable {
|
||||
}
|
||||
|
||||
if (in.readBoolean()) {
|
||||
String fIndex = in.readString();
|
||||
String fType = in.readString();
|
||||
String fId = in.readOptionalString();
|
||||
Throwable throwable = in.readThrowable();
|
||||
failure = new Failure(fIndex, fType, fId, throwable);
|
||||
failure = Failure.PROTOTYPE.readFrom(in);
|
||||
}
|
||||
}
|
||||
|
||||
@ -286,10 +324,7 @@ public class BulkItemResponse implements Streamable {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
out.writeString(failure.getIndex());
|
||||
out.writeString(failure.getType());
|
||||
out.writeOptionalString(failure.getId());
|
||||
out.writeThrowable(failure.getCause());
|
||||
failure.writeTo(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -56,6 +56,17 @@ public class BulkShardRequest extends ReplicationRequest<BulkShardRequest> {
|
||||
return items;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
// This is included in error messages so we'll try to make it somewhat user friendly.
|
||||
StringBuilder b = new StringBuilder("BulkShardRequest to [");
|
||||
b.append(index).append("] containing [").append(items.length).append("] requests");
|
||||
if (refresh) {
|
||||
b.append(" and a refresh");
|
||||
}
|
||||
return b.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String[] indices() {
|
||||
List<String> indices = new ArrayList<>();
|
||||
|
@ -22,7 +22,11 @@ package org.elasticsearch.action.index;
|
||||
import org.elasticsearch.ElasticsearchGenerationException;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.*;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.DocumentRequest;
|
||||
import org.elasticsearch.action.RoutingMissingException;
|
||||
import org.elasticsearch.action.TimestampParsingException;
|
||||
import org.elasticsearch.action.support.replication.ReplicationRequest;
|
||||
import org.elasticsearch.client.Requests;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
@ -35,7 +39,11 @@ import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.common.xcontent.*;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
@ -660,7 +668,7 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
type = in.readString();
|
||||
type = in.readOptionalString();
|
||||
id = in.readOptionalString();
|
||||
routing = in.readOptionalString();
|
||||
parent = in.readOptionalString();
|
||||
@ -677,7 +685,7 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeString(type);
|
||||
out.writeOptionalString(type);
|
||||
out.writeOptionalString(id);
|
||||
out.writeOptionalString(routing);
|
||||
out.writeOptionalString(parent);
|
||||
|
@ -20,7 +20,11 @@
|
||||
package org.elasticsearch.plugins;
|
||||
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.*;
|
||||
import org.elasticsearch.Build;
|
||||
import org.elasticsearch.ElasticsearchCorruptionException;
|
||||
import org.elasticsearch.ElasticsearchTimeoutException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.bootstrap.JarHell;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.cli.Terminal;
|
||||
@ -35,9 +39,24 @@ import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.nio.file.*;
|
||||
import java.nio.file.attribute.*;
|
||||
import java.util.*;
|
||||
import java.nio.file.DirectoryStream;
|
||||
import java.nio.file.FileVisitResult;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.SimpleFileVisitor;
|
||||
import java.nio.file.attribute.BasicFileAttributes;
|
||||
import java.nio.file.attribute.GroupPrincipal;
|
||||
import java.nio.file.attribute.PosixFileAttributeView;
|
||||
import java.nio.file.attribute.PosixFileAttributes;
|
||||
import java.nio.file.attribute.PosixFilePermission;
|
||||
import java.nio.file.attribute.UserPrincipal;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
import java.util.stream.StreamSupport;
|
||||
import java.util.zip.ZipEntry;
|
||||
import java.util.zip.ZipInputStream;
|
||||
@ -85,6 +104,7 @@ public class PluginManager {
|
||||
"mapper-attachments",
|
||||
"mapper-murmur3",
|
||||
"mapper-size",
|
||||
"reindex",
|
||||
"repository-azure",
|
||||
"repository-s3",
|
||||
"store-smb"));
|
||||
|
@ -72,7 +72,8 @@ public class RestPutWarmerAction extends BaseRestHandler {
|
||||
PutWarmerRequest putWarmerRequest = new PutWarmerRequest(request.param("name"));
|
||||
|
||||
BytesReference sourceBytes = RestActions.getRestContent(request);
|
||||
SearchSourceBuilder source = RestActions.getRestSearchSource(sourceBytes, queryRegistry, parseFieldMatcher);
|
||||
SearchSourceBuilder source = new SearchSourceBuilder();
|
||||
RestActions.parseRestSearchSource(source, sourceBytes, queryRegistry, parseFieldMatcher);
|
||||
SearchRequest searchRequest = new SearchRequest(Strings.splitStringByCommaToArray(request.param("index")))
|
||||
.types(Strings.splitStringByCommaToArray(request.param("type")))
|
||||
.requestCache(request.paramAsBoolean("request_cache", null)).source(source);
|
||||
|
@ -83,20 +83,34 @@ public class RestSearchAction extends BaseRestHandler {
|
||||
|
||||
@Override
|
||||
public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws IOException {
|
||||
SearchRequest searchRequest;
|
||||
searchRequest = RestSearchAction.parseSearchRequest(queryRegistry, request, parseFieldMatcher);
|
||||
SearchRequest searchRequest = new SearchRequest();
|
||||
RestSearchAction.parseSearchRequest(searchRequest, queryRegistry, request, parseFieldMatcher, null);
|
||||
client.search(searchRequest, new RestStatusToXContentListener<>(channel));
|
||||
}
|
||||
|
||||
public static SearchRequest parseSearchRequest(IndicesQueriesRegistry indicesQueriesRegistry, RestRequest request, ParseFieldMatcher parseFieldMatcher) throws IOException {
|
||||
String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
|
||||
SearchRequest searchRequest = new SearchRequest(indices);
|
||||
/**
|
||||
* Parses the rest request on top of the SearchRequest, preserving values
|
||||
* that are not overridden by the rest request.
|
||||
*
|
||||
* @param restContent
|
||||
* override body content to use for the request. If null body
|
||||
* content is read from the request using
|
||||
* RestAction.hasBodyContent.
|
||||
*/
|
||||
public static void parseSearchRequest(SearchRequest searchRequest, IndicesQueriesRegistry indicesQueriesRegistry, RestRequest request, ParseFieldMatcher parseFieldMatcher, BytesReference restContent) throws IOException {
|
||||
if (searchRequest.source() == null) {
|
||||
searchRequest.source(new SearchSourceBuilder());
|
||||
}
|
||||
searchRequest.indices(Strings.splitStringByCommaToArray(request.param("index")));
|
||||
// get the content, and put it in the body
|
||||
// add content/source as template if template flag is set
|
||||
boolean isTemplateRequest = request.path().endsWith("/template");
|
||||
final SearchSourceBuilder builder;
|
||||
if (RestActions.hasBodyContent(request)) {
|
||||
BytesReference restContent = RestActions.getRestContent(request);
|
||||
if (restContent == null) {
|
||||
if (RestActions.hasBodyContent(request)) {
|
||||
restContent = RestActions.getRestContent(request);
|
||||
}
|
||||
}
|
||||
if (restContent != null) {
|
||||
QueryParseContext context = new QueryParseContext(indicesQueriesRegistry);
|
||||
if (isTemplateRequest) {
|
||||
try (XContentParser parser = XContentFactory.xContent(restContent).createParser(restContent)) {
|
||||
@ -105,12 +119,9 @@ public class RestSearchAction extends BaseRestHandler {
|
||||
Template template = TemplateQueryParser.parse(parser, context.parseFieldMatcher(), "params", "template");
|
||||
searchRequest.template(template);
|
||||
}
|
||||
builder = null;
|
||||
} else {
|
||||
builder = RestActions.getRestSearchSource(restContent, indicesQueriesRegistry, parseFieldMatcher);
|
||||
RestActions.parseRestSearchSource(searchRequest.source(), restContent, indicesQueriesRegistry, parseFieldMatcher);
|
||||
}
|
||||
} else {
|
||||
builder = null;
|
||||
}
|
||||
|
||||
// do not allow 'query_and_fetch' or 'dfs_query_and_fetch' search types
|
||||
@ -123,15 +134,7 @@ public class RestSearchAction extends BaseRestHandler {
|
||||
} else {
|
||||
searchRequest.searchType(searchType);
|
||||
}
|
||||
if (builder == null) {
|
||||
SearchSourceBuilder extraBuilder = new SearchSourceBuilder();
|
||||
if (parseSearchSource(extraBuilder, request)) {
|
||||
searchRequest.source(extraBuilder);
|
||||
}
|
||||
} else {
|
||||
parseSearchSource(builder, request);
|
||||
searchRequest.source(builder);
|
||||
}
|
||||
parseSearchSource(searchRequest.source(), request);
|
||||
searchRequest.requestCache(request.paramAsBoolean("request_cache", null));
|
||||
|
||||
String scroll = request.param("scroll");
|
||||
@ -143,41 +146,35 @@ public class RestSearchAction extends BaseRestHandler {
|
||||
searchRequest.routing(request.param("routing"));
|
||||
searchRequest.preference(request.param("preference"));
|
||||
searchRequest.indicesOptions(IndicesOptions.fromRequest(request, searchRequest.indicesOptions()));
|
||||
|
||||
return searchRequest;
|
||||
}
|
||||
|
||||
private static boolean parseSearchSource(final SearchSourceBuilder searchSourceBuilder, RestRequest request) {
|
||||
|
||||
boolean modified = false;
|
||||
/**
|
||||
* Parses the rest request on top of the SearchSourceBuilder, preserving
|
||||
* values that are not overridden by the rest request.
|
||||
*/
|
||||
private static void parseSearchSource(final SearchSourceBuilder searchSourceBuilder, RestRequest request) {
|
||||
QueryBuilder<?> queryBuilder = RestActions.urlParamsToQueryBuilder(request);
|
||||
if (queryBuilder != null) {
|
||||
searchSourceBuilder.query(queryBuilder);
|
||||
modified = true;
|
||||
}
|
||||
|
||||
int from = request.paramAsInt("from", -1);
|
||||
if (from != -1) {
|
||||
searchSourceBuilder.from(from);
|
||||
modified = true;
|
||||
}
|
||||
int size = request.paramAsInt("size", -1);
|
||||
if (size != -1) {
|
||||
searchSourceBuilder.size(size);
|
||||
modified = true;
|
||||
}
|
||||
|
||||
if (request.hasParam("explain")) {
|
||||
searchSourceBuilder.explain(request.paramAsBoolean("explain", null));
|
||||
modified = true;
|
||||
}
|
||||
if (request.hasParam("version")) {
|
||||
searchSourceBuilder.version(request.paramAsBoolean("version", null));
|
||||
modified = true;
|
||||
}
|
||||
if (request.hasParam("timeout")) {
|
||||
searchSourceBuilder.timeout(request.paramAsTime("timeout", null));
|
||||
modified = true;
|
||||
}
|
||||
if (request.hasParam("terminate_after")) {
|
||||
int terminateAfter = request.paramAsInt("terminate_after",
|
||||
@ -186,7 +183,6 @@ public class RestSearchAction extends BaseRestHandler {
|
||||
throw new IllegalArgumentException("terminateAfter must be > 0");
|
||||
} else if (terminateAfter > 0) {
|
||||
searchSourceBuilder.terminateAfter(terminateAfter);
|
||||
modified = true;
|
||||
}
|
||||
}
|
||||
|
||||
@ -194,13 +190,11 @@ public class RestSearchAction extends BaseRestHandler {
|
||||
if (sField != null) {
|
||||
if (!Strings.hasText(sField)) {
|
||||
searchSourceBuilder.noFields();
|
||||
modified = true;
|
||||
} else {
|
||||
String[] sFields = Strings.splitStringByCommaToArray(sField);
|
||||
if (sFields != null) {
|
||||
for (String field : sFields) {
|
||||
searchSourceBuilder.field(field);
|
||||
modified = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -212,7 +206,6 @@ public class RestSearchAction extends BaseRestHandler {
|
||||
if (sFields != null) {
|
||||
for (String field : sFields) {
|
||||
searchSourceBuilder.fieldDataField(field);
|
||||
modified = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -220,12 +213,10 @@ public class RestSearchAction extends BaseRestHandler {
|
||||
FetchSourceContext fetchSourceContext = FetchSourceContext.parseFromRestRequest(request);
|
||||
if (fetchSourceContext != null) {
|
||||
searchSourceBuilder.fetchSource(fetchSourceContext);
|
||||
modified = true;
|
||||
}
|
||||
|
||||
if (request.hasParam("track_scores")) {
|
||||
searchSourceBuilder.trackScores(request.paramAsBoolean("track_scores", false));
|
||||
modified = true;
|
||||
}
|
||||
|
||||
String sSorts = request.param("sort");
|
||||
@ -238,14 +229,11 @@ public class RestSearchAction extends BaseRestHandler {
|
||||
String reverse = sort.substring(delimiter + 1);
|
||||
if ("asc".equals(reverse)) {
|
||||
searchSourceBuilder.sort(sortField, SortOrder.ASC);
|
||||
modified = true;
|
||||
} else if ("desc".equals(reverse)) {
|
||||
searchSourceBuilder.sort(sortField, SortOrder.DESC);
|
||||
modified = true;
|
||||
}
|
||||
} else {
|
||||
searchSourceBuilder.sort(sort);
|
||||
modified = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -253,7 +241,6 @@ public class RestSearchAction extends BaseRestHandler {
|
||||
String sStats = request.param("stats");
|
||||
if (sStats != null) {
|
||||
searchSourceBuilder.stats(Arrays.asList(Strings.splitStringByCommaToArray(sStats)));
|
||||
modified = true;
|
||||
}
|
||||
|
||||
String suggestField = request.param("suggest_field");
|
||||
@ -263,8 +250,6 @@ public class RestSearchAction extends BaseRestHandler {
|
||||
String suggestMode = request.param("suggest_mode");
|
||||
searchSourceBuilder.suggest(new SuggestBuilder().addSuggestion(
|
||||
termSuggestion(suggestField).field(suggestField).text(suggestText).size(suggestSize).suggestMode(suggestMode)));
|
||||
modified = true;
|
||||
}
|
||||
return modified;
|
||||
}
|
||||
}
|
||||
|
@ -27,8 +27,17 @@ import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.common.xcontent.*;
|
||||
import org.elasticsearch.index.query.*;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.query.Operator;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.index.query.QueryParseContext;
|
||||
import org.elasticsearch.index.query.QueryStringQueryBuilder;
|
||||
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
@ -104,14 +113,14 @@ public class RestActions {
|
||||
return queryBuilder;
|
||||
}
|
||||
|
||||
public static SearchSourceBuilder getRestSearchSource(BytesReference sourceBytes, IndicesQueriesRegistry queryRegistry,
|
||||
public static void parseRestSearchSource(SearchSourceBuilder source, BytesReference sourceBytes, IndicesQueriesRegistry queryRegistry,
|
||||
ParseFieldMatcher parseFieldMatcher)
|
||||
throws IOException {
|
||||
XContentParser parser = XContentFactory.xContent(sourceBytes).createParser(sourceBytes);
|
||||
QueryParseContext queryParseContext = new QueryParseContext(queryRegistry);
|
||||
queryParseContext.reset(parser);
|
||||
queryParseContext.parseFieldMatcher(parseFieldMatcher);
|
||||
return SearchSourceBuilder.parseSearchSource(parser, queryParseContext);
|
||||
source.parseXContent(parser, queryParseContext);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -30,7 +30,7 @@ import org.elasticsearch.rest.RestStatus;
|
||||
* A REST based action listener that assumes the response is of type {@link ToXContent} and automatically
|
||||
* builds an XContent based response (wrapping the toXContent in startObject/endObject).
|
||||
*/
|
||||
public final class RestToXContentListener<Response extends ToXContent> extends RestResponseListener<Response> {
|
||||
public class RestToXContentListener<Response extends ToXContent> extends RestResponseListener<Response> {
|
||||
|
||||
public RestToXContentListener(RestChannel channel) {
|
||||
super(channel);
|
||||
@ -45,6 +45,10 @@ public final class RestToXContentListener<Response extends ToXContent> extends R
|
||||
builder.startObject();
|
||||
response.toXContent(builder, channel.request());
|
||||
builder.endObject();
|
||||
return new BytesRestResponse(RestStatus.OK, builder);
|
||||
return new BytesRestResponse(getStatus(response), builder);
|
||||
}
|
||||
|
||||
protected RestStatus getStatus(Response response) {
|
||||
return RestStatus.OK;
|
||||
}
|
||||
}
|
||||
|
@ -695,8 +695,19 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
||||
return ext;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new SearchSourceBuilder with attributes set by an xContent.
|
||||
*/
|
||||
public SearchSourceBuilder fromXContent(XContentParser parser, QueryParseContext context) throws IOException {
|
||||
SearchSourceBuilder builder = new SearchSourceBuilder();
|
||||
builder.parseXContent(parser, context);
|
||||
return builder;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse some xContent into this SearchSourceBuilder, overwriting any values specified in the xContent.
|
||||
*/
|
||||
public void parseXContent(XContentParser parser, QueryParseContext context) throws IOException {
|
||||
XContentParser.Token token = parser.currentToken();
|
||||
String currentFieldName = null;
|
||||
if (token != XContentParser.Token.START_OBJECT && (token = parser.nextToken()) != XContentParser.Token.START_OBJECT) {
|
||||
@ -708,42 +719,42 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token.isValue()) {
|
||||
if (context.parseFieldMatcher().match(currentFieldName, FROM_FIELD)) {
|
||||
builder.from = parser.intValue();
|
||||
from = parser.intValue();
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, SIZE_FIELD)) {
|
||||
builder.size = parser.intValue();
|
||||
size = parser.intValue();
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, TIMEOUT_FIELD)) {
|
||||
builder.timeoutInMillis = parser.longValue();
|
||||
timeoutInMillis = parser.longValue();
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, TERMINATE_AFTER_FIELD)) {
|
||||
builder.terminateAfter = parser.intValue();
|
||||
terminateAfter = parser.intValue();
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, MIN_SCORE_FIELD)) {
|
||||
builder.minScore = parser.floatValue();
|
||||
minScore = parser.floatValue();
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, VERSION_FIELD)) {
|
||||
builder.version = parser.booleanValue();
|
||||
version = parser.booleanValue();
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, EXPLAIN_FIELD)) {
|
||||
builder.explain = parser.booleanValue();
|
||||
explain = parser.booleanValue();
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, TRACK_SCORES_FIELD)) {
|
||||
builder.trackScores = parser.booleanValue();
|
||||
trackScores = parser.booleanValue();
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, _SOURCE_FIELD)) {
|
||||
builder.fetchSourceContext = FetchSourceContext.parse(parser, context);
|
||||
fetchSourceContext = FetchSourceContext.parse(parser, context);
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, FIELDS_FIELD)) {
|
||||
List<String> fieldNames = new ArrayList<>();
|
||||
fieldNames = new ArrayList<>();
|
||||
fieldNames.add(parser.text());
|
||||
builder.fieldNames = fieldNames;
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, SORT_FIELD)) {
|
||||
builder.sort(parser.text());
|
||||
sorts = new ArrayList<>();
|
||||
sort(parser.text());
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName + "].",
|
||||
parser.getTokenLocation());
|
||||
}
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
if (context.parseFieldMatcher().match(currentFieldName, QUERY_FIELD)) {
|
||||
builder.queryBuilder = context.parseInnerQueryBuilder();
|
||||
queryBuilder = context.parseInnerQueryBuilder();
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, POST_FILTER_FIELD)) {
|
||||
builder.postQueryBuilder = context.parseInnerQueryBuilder();
|
||||
postQueryBuilder = context.parseInnerQueryBuilder();
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, _SOURCE_FIELD)) {
|
||||
builder.fetchSourceContext = FetchSourceContext.parse(parser, context);
|
||||
fetchSourceContext = FetchSourceContext.parse(parser, context);
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, SCRIPT_FIELDS_FIELD)) {
|
||||
List<ScriptField> scriptFields = new ArrayList<>();
|
||||
scriptFields = new ArrayList<>();
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
String scriptFieldName = parser.currentName();
|
||||
token = parser.nextToken();
|
||||
@ -780,9 +791,8 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
||||
+ currentFieldName + "] but found [" + token + "]", parser.getTokenLocation());
|
||||
}
|
||||
}
|
||||
builder.scriptFields = scriptFields;
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, INDICES_BOOST_FIELD)) {
|
||||
ObjectFloatHashMap<String> indexBoost = new ObjectFloatHashMap<String>();
|
||||
indexBoost = new ObjectFloatHashMap<String>();
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
@ -793,9 +803,8 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
||||
parser.getTokenLocation());
|
||||
}
|
||||
}
|
||||
builder.indexBoost = indexBoost;
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, AGGREGATIONS_FIELD)) {
|
||||
List<BytesReference> aggregations = new ArrayList<>();
|
||||
aggregations = new ArrayList<>();
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
currentFieldName = parser.currentName();
|
||||
token = parser.nextToken();
|
||||
@ -811,24 +820,22 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
||||
parser.getTokenLocation());
|
||||
}
|
||||
}
|
||||
builder.aggregations = aggregations;
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, HIGHLIGHT_FIELD)) {
|
||||
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser);
|
||||
builder.highlightBuilder = xContentBuilder.bytes();
|
||||
highlightBuilder = xContentBuilder.bytes();
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, INNER_HITS_FIELD)) {
|
||||
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser);
|
||||
builder.innerHitsBuilder = xContentBuilder.bytes();
|
||||
innerHitsBuilder = xContentBuilder.bytes();
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, SUGGEST_FIELD)) {
|
||||
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser);
|
||||
builder.suggestBuilder = xContentBuilder.bytes();
|
||||
suggestBuilder = xContentBuilder.bytes();
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, SORT_FIELD)) {
|
||||
List<BytesReference> sorts = new ArrayList<>();
|
||||
sorts = new ArrayList<>();
|
||||
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser);
|
||||
sorts.add(xContentBuilder.bytes());
|
||||
builder.sorts = sorts;
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, EXT_FIELD)) {
|
||||
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser);
|
||||
builder.ext = xContentBuilder.bytes();
|
||||
ext = xContentBuilder.bytes();
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName + "].",
|
||||
parser.getTokenLocation());
|
||||
@ -836,7 +843,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
||||
} else if (token == XContentParser.Token.START_ARRAY) {
|
||||
|
||||
if (context.parseFieldMatcher().match(currentFieldName, FIELDS_FIELD)) {
|
||||
List<String> fieldNames = new ArrayList<>();
|
||||
fieldNames = new ArrayList<>();
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
|
||||
if (token == XContentParser.Token.VALUE_STRING) {
|
||||
fieldNames.add(parser.text());
|
||||
@ -845,9 +852,8 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
||||
+ currentFieldName + "] but found [" + token + "]", parser.getTokenLocation());
|
||||
}
|
||||
}
|
||||
builder.fieldNames = fieldNames;
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, FIELDDATA_FIELDS_FIELD)) {
|
||||
List<String> fieldDataFields = new ArrayList<>();
|
||||
fieldDataFields = new ArrayList<>();
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
|
||||
if (token == XContentParser.Token.VALUE_STRING) {
|
||||
fieldDataFields.add(parser.text());
|
||||
@ -856,23 +862,20 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
||||
+ currentFieldName + "] but found [" + token + "]", parser.getTokenLocation());
|
||||
}
|
||||
}
|
||||
builder.fieldDataFields = fieldDataFields;
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, SORT_FIELD)) {
|
||||
List<BytesReference> sorts = new ArrayList<>();
|
||||
sorts = new ArrayList<>();
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
|
||||
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser);
|
||||
sorts.add(xContentBuilder.bytes());
|
||||
}
|
||||
builder.sorts = sorts;
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, RESCORE_FIELD)) {
|
||||
List<BytesReference> rescoreBuilders = new ArrayList<>();
|
||||
rescoreBuilders = new ArrayList<>();
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
|
||||
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser);
|
||||
rescoreBuilders.add(xContentBuilder.bytes());
|
||||
}
|
||||
builder.rescoreBuilders = rescoreBuilders;
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, STATS_FIELD)) {
|
||||
List<String> stats = new ArrayList<>();
|
||||
stats = new ArrayList<>();
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
|
||||
if (token == XContentParser.Token.VALUE_STRING) {
|
||||
stats.add(parser.text());
|
||||
@ -881,9 +884,8 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
||||
+ currentFieldName + "] but found [" + token + "]", parser.getTokenLocation());
|
||||
}
|
||||
}
|
||||
builder.stats = stats;
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, _SOURCE_FIELD)) {
|
||||
builder.fetchSourceContext = FetchSourceContext.parse(parser, context);
|
||||
fetchSourceContext = FetchSourceContext.parse(parser, context);
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName + "].",
|
||||
parser.getTokenLocation());
|
||||
@ -893,7 +895,6 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
||||
parser.getTokenLocation());
|
||||
}
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -50,6 +50,7 @@ OFFICIAL PLUGINS
|
||||
- mapper-attachments
|
||||
- mapper-murmur3
|
||||
- mapper-size
|
||||
- reindex
|
||||
- repository-azure
|
||||
- repository-s3
|
||||
- store-smb
|
||||
@ -62,5 +63,5 @@ OPTIONS
|
||||
-v,--verbose Verbose output
|
||||
|
||||
-h,--help Shows this message
|
||||
|
||||
|
||||
-b,--batch Enable batch mode explicitly, automatic confirmation of security permissions
|
||||
|
@ -22,8 +22,8 @@ package org.elasticsearch.action.admin.indices.create;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.rest.NoOpClient;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.client.NoOpClient;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
|
@ -0,0 +1,35 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.bulk;
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import static org.apache.lucene.util.TestUtil.randomSimpleString;
|
||||
|
||||
public class BulkShardRequestTests extends ESTestCase {
|
||||
public void testToString() {
|
||||
String index = randomSimpleString(getRandom(), 10);
|
||||
int count = between(1, 100);
|
||||
BulkShardRequest r = new BulkShardRequest(null, index, 0, false, new BulkItemRequest[count]);
|
||||
assertEquals("BulkShardRequest to [" + index + "] containing [" + count + "] requests", r.toString());
|
||||
r = new BulkShardRequest(null, index, 0, true, new BulkItemRequest[count]);
|
||||
assertEquals("BulkShardRequest to [" + index + "] containing [" + count + "] requests and a refresh", r.toString());
|
||||
}
|
||||
}
|
@ -23,8 +23,8 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.rest.NoOpClient;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.client.NoOpClient;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
|
@ -35,6 +35,7 @@ import org.elasticsearch.client.Requests;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.client.NoOpClient;
|
||||
import org.elasticsearch.test.rest.FakeRestRequest;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
@ -15,6 +15,11 @@ The delete by query plugin adds support for deleting all of the documents
|
||||
replacement for the problematic _delete-by-query_ functionality which has been
|
||||
removed from Elasticsearch core.
|
||||
|
||||
<<plugins-reindex,Reindex>>::
|
||||
|
||||
The Reindex plugin adds support for updating documents matching a query and
|
||||
copying documents from one index to another.
|
||||
|
||||
[float]
|
||||
=== Community contributed API extension plugins
|
||||
|
||||
|
676
docs/plugins/reindex.asciidoc
Normal file
676
docs/plugins/reindex.asciidoc
Normal file
@ -0,0 +1,676 @@
|
||||
[[plugins-reindex]]
|
||||
=== Reindex Plugin
|
||||
|
||||
The reindex plugin adds two APIs:
|
||||
|
||||
* `_update_by_query` updates all documents matching a query in place.
|
||||
* `_reindex` copies documents from one index to another.
|
||||
|
||||
These APIs are siblings so they live in the same plugin. Both use
|
||||
{ref}/search-request-scroll.html[Scroll] and {ref}/docs-bulk.html[Bulk] APIs
|
||||
to send an index request per document. There are potential shortcuts that could
|
||||
speed this process so this plugin may change how this is done in the future.
|
||||
|
||||
[float]
|
||||
==== Installation
|
||||
|
||||
This plugin can be installed using the plugin manager:
|
||||
|
||||
[source,sh]
|
||||
----------------------------------------------------------------
|
||||
sudo bin/plugin install reindex
|
||||
----------------------------------------------------------------
|
||||
|
||||
The plugin must be installed on every node in the cluster, and each node must
|
||||
be restarted after installation.
|
||||
|
||||
[float]
|
||||
==== Removal
|
||||
|
||||
The plugin can be removed with the following command:
|
||||
|
||||
[source,sh]
|
||||
----------------------------------------------------------------
|
||||
sudo bin/plugin remove reindex
|
||||
----------------------------------------------------------------
|
||||
|
||||
The node must be stopped before removing the plugin.
|
||||
|
||||
[[update-by-query-usage]]
|
||||
==== Using `_update_by_query`
|
||||
|
||||
The simplest usage of `_update_by_query` just performs an update on every
|
||||
document in the index without changing the source. This is useful to
|
||||
<<picking-up-a-new-property,pick up a new property>> or some other online
|
||||
mapping change. Here is the API:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /twitter/_update_by_query?conflicts=proceed
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
That will return something like this:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"took" : 639,
|
||||
"updated": 1235,
|
||||
"batches": 13,
|
||||
"version_conflicts": 2,
|
||||
"failures" : [ ]
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
`_update_by_query` gets a snapshot of the index when it starts and indexes what
|
||||
it finds using `internal` versioning. That means that you'll get a version
|
||||
conflict if the document changes between the time when the snapshot was taken
|
||||
and when the index request is processed. When the versions match the document
|
||||
is updated and the version number is incremented.
|
||||
|
||||
All update and query failures cause the `_update_by_query` to abort and are
|
||||
returned in the `failures` of the response. The updates that have been
|
||||
performed still stick. In other words, the process is not rolled back, only
|
||||
aborted. While the first failure causes the abort all failures that are
|
||||
returned by the failing bulk request are returned in the `failures` element so
|
||||
it's possible for there to be quite a few.
|
||||
|
||||
If you want to simply count version conflicts not cause the `_update_by_query`
|
||||
to abort you can set `conflicts=proceed` on the url or `"conflicts": "proceed"`
|
||||
in the request body. The first example does this because it is just trying to
|
||||
pick up an online mapping change and a version conflict simply means that the
|
||||
conflicting document was updated between the start of the `_update_by_query`
|
||||
and the time when it attempted to update the document. This is fine because
|
||||
that update will have picked up the online mapping update.
|
||||
|
||||
Back to the API format, you can limit `_update_by_query` to a single type. This
|
||||
will only update `tweet`s from the `twitter` index:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /twitter/tweet/_update_by_query?conflicts=proceed
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
You can also limit `_update_by_query` using the
|
||||
{ref}/query-dsl.html[Query DSL]. This will update all documents from the
|
||||
`twitter` index for the user `kimchy`:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /twitter/_update_by_query?conflicts=proceed
|
||||
{
|
||||
"query": { <1>
|
||||
"term": {
|
||||
"user": "kimchy"
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
<1> The query must be passed as a value to the `query` key, in the same
|
||||
way as the {ref}/search-search.html[Search API]. You can also use the `q`
|
||||
parameter in the same way as the search api.
|
||||
|
||||
So far we've only been updating documents without changing their source. That
|
||||
is genuinely useful for things like
|
||||
<<picking-up-a-new-property,picking up new properties>> but it's only half the
|
||||
fun. `_update_by_query` supports a `script` object to update the document. This
|
||||
will increment the `likes` field on all of kimchy's tweets:
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /twitter/_update_by_query
|
||||
{
|
||||
"script": {
|
||||
"inline": "ctx._source.likes++"
|
||||
},
|
||||
"query": {
|
||||
"term": {
|
||||
"user": "kimchy"
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
Just as in {ref}/docs-update.html[Update API] you can set `ctx.op = "noop"` if
|
||||
your script decides that it doesn't have to make any changes. That will cause
|
||||
`_update_by_query` to omit that document from its updates. Setting `ctx.op` to
|
||||
anything else is an error. If you want to delete by a query you can use the
|
||||
<<plugins-delete-by-query,Delete by Query Plugin>> instead. Setting any other
|
||||
field in `ctx` is an error.
|
||||
|
||||
Note that we stopped specifying `conflicts=proceed`. In this case we want a
|
||||
version conflict to abort the process so we can handle the failure.
|
||||
|
||||
This API doesn't allow you to move the documents it touches, just modify their
|
||||
source. This is intentional! We've made no provisions for removing the document
|
||||
from its original location.
|
||||
|
||||
It's also possible to do this whole thing on multiple indexes and multiple
|
||||
types at once, just like the search API:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /twitter,blog/tweet,post/_update_by_query
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
If you provide `routing` then the routing is copied to the scroll query,
|
||||
limiting the process to the shards that match that routing value:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /twitter/_update_by_query?routing=1
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
By default `_update_by_query` uses scroll batches of 100. You can change the
|
||||
batch size with the `scroll_size` URL parameter:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /twitter/_update_by_query?scroll_size=1000
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
[[reindex-usage]]
|
||||
==== Using `_reindex`
|
||||
|
||||
`_reindex`'s most basic form just copies documents from one index to another.
|
||||
This will copy documents from `twitter` into `new_twitter`:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /_reindex
|
||||
{
|
||||
"source": {
|
||||
"index": "twitter"
|
||||
},
|
||||
"dest": {
|
||||
"index": "new_twitter"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
That will return something like this:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"took" : 639,
|
||||
"updated": 112,
|
||||
"batches": 130,
|
||||
"version_conflicts": 0,
|
||||
"failures" : [ ],
|
||||
"created": 12344
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
Just like `_update_by_query`, `_reindex` gets a snapshot of the source index
|
||||
but its target must be a **different** index so version conflicts are unlikely.
|
||||
The `dest` element can be configured like the index API to control optimistic
|
||||
concurrency control. Just leaving out `version_type` (as above) or setting it
|
||||
to `internal` will cause Elasticsearch to blindly dump documents into the
|
||||
target, overwriting any that happen to have the same type and id:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /_reindex
|
||||
{
|
||||
"source": {
|
||||
"index": "twitter"
|
||||
},
|
||||
"dest": {
|
||||
"index": "new_twitter",
|
||||
"version_type": "internal"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
Setting `version_type` to `external` will cause Elasticsearch to preserve the
|
||||
`version` from the source, create any documents that are missing, and update
|
||||
any documents that have an older version in the destination index than they do
|
||||
in the source index:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /_reindex
|
||||
{
|
||||
"source": {
|
||||
"index": "twitter"
|
||||
},
|
||||
"dest": {
|
||||
"index": "new_twitter",
|
||||
"version_type": "external"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
Settings `op_type` to `create` will cause `_reindex` to only create missing
|
||||
documents in the target index. All existing documents will cause a version
|
||||
conflict:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /_reindex
|
||||
{
|
||||
"source": {
|
||||
"index": "twitter"
|
||||
},
|
||||
"dest": {
|
||||
"index": "new_twitter",
|
||||
"op_type": "create"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
By default version conflicts abort the `_reindex` process but you can just
|
||||
count them by settings `"conflicts": "proceed"` in the request body:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /_reindex
|
||||
{
|
||||
"conflicts": "proceed",
|
||||
"source": {
|
||||
"index": "twitter"
|
||||
},
|
||||
"dest": {
|
||||
"index": "new_twitter",
|
||||
"op_type": "create"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
You can limit the documents by adding a type to the `source` or by adding a
|
||||
query. This will only copy `tweet`s made by `kimchy` into `new_twitter`:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /_reindex
|
||||
{
|
||||
"source": {
|
||||
"index": "twitter",
|
||||
"type": "tweet",
|
||||
"query": {
|
||||
"term": {
|
||||
"user": "kimchy"
|
||||
}
|
||||
}
|
||||
},
|
||||
"dest": {
|
||||
"index": "new_twitter"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
`index` and `type` in `source` can both be lists, allowing you to copy from
|
||||
lots of sources in one request. This will copy documents from the `tweet` and
|
||||
`post` types in the `twitter` and `blog` index. It'd include the `post` type in
|
||||
the `twitter` index and the `tweet` type in the `blog` index. If you want to be
|
||||
more specific you'll need to use the `query`. It also makes no effort to handle
|
||||
id collisions. The target index will remain valid but it's not easy to predict
|
||||
which document will survive because the iteration order isn't well defined.
|
||||
Just avoid that situation, ok?
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /_reindex
|
||||
{
|
||||
"source": {
|
||||
"index": ["twitter", "blog"],
|
||||
"type": ["tweet", "post"]
|
||||
},
|
||||
"index": {
|
||||
"index": "all_together"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
It's also possible to limit the number of processed documents by setting
|
||||
`size`. This will only copy a single document from `twitter` to
|
||||
`new_twitter`:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /_reindex
|
||||
{
|
||||
"size": 1,
|
||||
"source": {
|
||||
"index": "twitter"
|
||||
},
|
||||
"dest": {
|
||||
"index": "new_twitter"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
If you want a particular set of documents from the twitter index you'll
|
||||
need to sort. Sorting makes the scroll less efficient but in some contexts
|
||||
it's worth it. If possible, prefer a more selective query to `size` and `sort`.
|
||||
This will copy 10000 documents from `twitter` into `new_twitter`:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /_reindex
|
||||
{
|
||||
"size": 10000,
|
||||
"source": {
|
||||
"index": "twitter",
|
||||
"sort": { "date": "desc" }
|
||||
},
|
||||
"dest": {
|
||||
"index": "new_twitter"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
Like `_update_by_query`, `_reindex` supports a script that modifies the
|
||||
document. Unlike `_update_by_query`, the script is allowed to modify the
|
||||
document's metadata. This example bumps the version of the source document:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /_reindex
|
||||
{
|
||||
"source": {
|
||||
"index": "twitter",
|
||||
},
|
||||
"dest": {
|
||||
"index": "new_twitter",
|
||||
"version_type": "external"
|
||||
}
|
||||
"script": {
|
||||
"internal": "if (ctx._source.foo == 'bar') {ctx._version++; ctx._source.remove('foo')}"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
Think of the possibilities! Just be careful! With great power.... You can
|
||||
change:
|
||||
* "_id"
|
||||
* "_type"
|
||||
* "_index"
|
||||
* "_version"
|
||||
* "_routing"
|
||||
* "_parent"
|
||||
* "_timestamp"
|
||||
* "_ttl"
|
||||
|
||||
Setting `_version` to `null` or clearing it from the `ctx` map is just like not
|
||||
sending the version in an indexing request. It will cause that document to be
|
||||
overwritten in the target index regardless of the version on the target or the
|
||||
version type you use in the `_reindex` request.
|
||||
|
||||
By default if `_reindex` sees a document with routing then the routing is
|
||||
preserved unless it's changed by the script. You can set `routing` on the
|
||||
`dest` request to change this:
|
||||
|
||||
`keep`::
|
||||
|
||||
Sets the routing on the bulk request sent for each match to the routing on
|
||||
the match. The default.
|
||||
|
||||
`discard`::
|
||||
|
||||
Sets the routing on the bulk request sent for each match to null.
|
||||
|
||||
`=<some text>`::
|
||||
|
||||
Sets the routing on the bulk request sent for each match to all text after
|
||||
the `=`.
|
||||
|
||||
For example, you can use the following request to copy all documents from
|
||||
the `source` index with the company name `cat` into the `dest` index with
|
||||
routing set to `cat`.
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /_reindex
|
||||
{
|
||||
"source": {
|
||||
"index": "source"
|
||||
"query": {
|
||||
"match": {
|
||||
"company": "cat"
|
||||
}
|
||||
}
|
||||
}
|
||||
"index": {
|
||||
"index": "dest",
|
||||
"routing": "=cat"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
|
||||
[float]
|
||||
=== URL Parameters
|
||||
|
||||
In addition to the standard parameters like `pretty`, all APIs in this plugin
|
||||
support `refresh`, `consistency`, and `timeout`.
|
||||
|
||||
Sending the `refresh` url parameter will cause all indexes to which the request
|
||||
wrote to be refreshed. This is different than the Index API's `refresh`
|
||||
parameter which causes just the shard that received the new data to be indexed.
|
||||
|
||||
`consistency` controls how many copies of a shard must respond to each write
|
||||
request. `timeout` controls how long each write request waits for unavailable
|
||||
shards to become available. Both work exactly how they work in the
|
||||
{ref}/docs-bulk.html[Bulk API].
|
||||
|
||||
`timeout` controls how long each batch waits for the target shard to become
|
||||
available. It works exactly how it works in the {ref}/docs-bulk.html[Bulk API].
|
||||
|
||||
[float]
|
||||
=== Response body
|
||||
|
||||
The JSON response looks like this:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"took" : 639,
|
||||
"updated": 0,
|
||||
"batches": 1,
|
||||
"version_conflicts": 2,
|
||||
"failures" : [ ]
|
||||
"created": 123,
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
`took`::
|
||||
|
||||
The number of milliseconds from start to end of the whole operation.
|
||||
|
||||
`updated`::
|
||||
|
||||
The number of documents that were successfully updated.
|
||||
|
||||
`batches`::
|
||||
|
||||
The number of scroll responses pulled back by the the `_reindex` or
|
||||
`_update_by_query`.
|
||||
|
||||
`version_conflicts`::
|
||||
|
||||
The number of version conflicts that the `_reindex_` or `_update_by_query` hit.
|
||||
|
||||
`failures`::
|
||||
|
||||
Array of all indexing failures. If this is non-empty then the request aborted
|
||||
because of those failures. See `conflicts` for how to prevent version conflicts
|
||||
from aborting the operation.
|
||||
|
||||
`created`::
|
||||
|
||||
The number of documents that were successfully created. This is not returned by
|
||||
`_update_by_query` because it isn't allowed to create documents.
|
||||
|
||||
[float]
|
||||
=== Examples
|
||||
|
||||
Below are some examples of how you might use this plugin:
|
||||
|
||||
[[picking-up-a-new-property]]
|
||||
==== Pick up a new property
|
||||
|
||||
Say you created an index without dynamic mapping, filled it with data, and then
|
||||
added a mapping value to pick up more fields from the data:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT test
|
||||
{
|
||||
"mappings": {
|
||||
"test": {
|
||||
"dynamic": false, <1>
|
||||
"properties": {
|
||||
"text": {"type": "string"}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
POST test/test?refresh
|
||||
{
|
||||
"text": "words words",
|
||||
"flag": "bar"
|
||||
}'
|
||||
POST test/test?refresh
|
||||
{
|
||||
"text": "words words",
|
||||
"flag": "foo"
|
||||
}'
|
||||
PUT test/_mapping/test <2>
|
||||
{
|
||||
"properties": {
|
||||
"text": {"type": "string"},
|
||||
"flag": {"type": "string", "analyzer": "keyword"}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
<1> This means that new fields won't be indexed, just stored in `_source`.
|
||||
|
||||
<2> This updates the mapping to add the new `flag` field. To pick up the new
|
||||
field you have to reindex all documents with it.
|
||||
|
||||
Searching for the data won't find anything:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST test/_search?filter_path=hits.total
|
||||
{
|
||||
"query": {
|
||||
"match": {
|
||||
"flag": "foo"
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"hits" : {
|
||||
"total" : 0
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
But you can issue an `_update_by_query` request to pick up the new mapping:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST test/_update_by_query?refresh&conflicts=proceed
|
||||
POST test/_search?filter_path=hits.total
|
||||
{
|
||||
"query": {
|
||||
"match": {
|
||||
"flag": "foo"
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"hits" : {
|
||||
"total" : 1
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
Hurray! You can do the exact same thing when adding a field to a multifield.
|
||||
|
||||
==== Change the name of a field
|
||||
|
||||
`_reindex` can be used to build a copy of an index with renamed fields. Say you
|
||||
create an index containing documents that look like this:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST test/test/1?refresh&pretty
|
||||
{
|
||||
"text": "words words",
|
||||
"flag": "foo"
|
||||
}
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
But you don't like the name `flag` and want to replace it with `tag`.
|
||||
`_reindex` can create the other index for you:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST _reindex?pretty
|
||||
{
|
||||
"source": {
|
||||
"index": "test"
|
||||
},
|
||||
"dest": {
|
||||
"index": "test2"
|
||||
},
|
||||
"script": {
|
||||
"inline": "ctx._source.tag = ctx._source.remove(\"flag\")"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
Now you can get the new document:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET test2/test/1?pretty
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
and it'll look like:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"text": "words words",
|
||||
"tag": "foo"
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
Or you can search by `tag` or whatever you want.
|
23
plugins/reindex/build.gradle
Normal file
23
plugins/reindex/build.gradle
Normal file
@ -0,0 +1,23 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
esplugin {
|
||||
description 'The Reindex Plugin adds APIs to reindex from one index to another or update documents in place.'
|
||||
classname 'org.elasticsearch.plugin.reindex.ReindexPlugin'
|
||||
}
|
@ -0,0 +1,373 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugin.reindex;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
|
||||
import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse;
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse.Failure;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.bulk.BulkResponse;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.action.search.ClearScrollRequest;
|
||||
import org.elasticsearch.action.search.ClearScrollResponse;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.search.SearchScrollRequest;
|
||||
import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static java.lang.Math.max;
|
||||
import static java.util.Collections.unmodifiableList;
|
||||
import static org.elasticsearch.plugin.reindex.AbstractBulkByScrollRequest.SIZE_ALL_MATCHES;
|
||||
import static org.elasticsearch.rest.RestStatus.CONFLICT;
|
||||
import static org.elasticsearch.search.sort.SortBuilders.fieldSort;
|
||||
|
||||
/**
|
||||
* Abstract base for scrolling across a search and executing bulk actions on all
|
||||
* results.
|
||||
*/
|
||||
public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBulkByScrollRequest<Request>, Response> {
|
||||
protected final Request mainRequest;
|
||||
|
||||
private final AtomicLong startTime = new AtomicLong(-1);
|
||||
private final AtomicLong updated = new AtomicLong(0);
|
||||
private final AtomicLong created = new AtomicLong(0);
|
||||
private final AtomicLong deleted = new AtomicLong(0);
|
||||
private final AtomicInteger batches = new AtomicInteger(0);
|
||||
private final AtomicLong versionConflicts = new AtomicLong(0);
|
||||
private final AtomicReference<String> scroll = new AtomicReference<>();
|
||||
private final List<Failure> indexingFailures = new CopyOnWriteArrayList<>();
|
||||
private final List<ShardSearchFailure> searchFailures = new CopyOnWriteArrayList<>();
|
||||
private final Set<String> destinationIndices = Collections.newSetFromMap(new ConcurrentHashMap<>());
|
||||
|
||||
private final ESLogger logger;
|
||||
private final Client client;
|
||||
private final ThreadPool threadPool;
|
||||
private final SearchRequest firstSearchRequest;
|
||||
private final ActionListener<Response> listener;
|
||||
|
||||
public AbstractAsyncBulkByScrollAction(ESLogger logger, Client client, ThreadPool threadPool, Request mainRequest,
|
||||
SearchRequest firstSearchRequest, ActionListener<Response> listener) {
|
||||
this.logger = logger;
|
||||
this.client = client;
|
||||
this.threadPool = threadPool;
|
||||
this.mainRequest = mainRequest;
|
||||
this.firstSearchRequest = firstSearchRequest;
|
||||
this.listener = listener;
|
||||
}
|
||||
|
||||
protected abstract BulkRequest buildBulk(Iterable<SearchHit> docs);
|
||||
|
||||
protected abstract Response buildResponse(long took);
|
||||
|
||||
public void start() {
|
||||
initialSearch();
|
||||
}
|
||||
|
||||
/**
|
||||
* Count of documents updated.
|
||||
*/
|
||||
public long updated() {
|
||||
return updated.get();
|
||||
}
|
||||
|
||||
/**
|
||||
* Count of documents created.
|
||||
*/
|
||||
public long created() {
|
||||
return created.get();
|
||||
}
|
||||
|
||||
/**
|
||||
* Count of successful delete operations.
|
||||
*/
|
||||
public long deleted() {
|
||||
return deleted.get();
|
||||
}
|
||||
|
||||
/**
|
||||
* The number of scan responses this request has processed.
|
||||
*/
|
||||
public int batches() {
|
||||
return batches.get();
|
||||
}
|
||||
|
||||
public long versionConflicts() {
|
||||
return versionConflicts.get();
|
||||
}
|
||||
|
||||
public long successfullyProcessed() {
|
||||
return updated.get() + created.get() + deleted.get();
|
||||
}
|
||||
|
||||
public List<Failure> indexingFailures() {
|
||||
return unmodifiableList(indexingFailures);
|
||||
}
|
||||
|
||||
public List<ShardSearchFailure> searchFailures() {
|
||||
return unmodifiableList(searchFailures);
|
||||
}
|
||||
|
||||
private void initialSearch() {
|
||||
try {
|
||||
// Default to sorting by _doc if it hasn't been changed.
|
||||
if (firstSearchRequest.source().sorts() == null) {
|
||||
firstSearchRequest.source().sort(fieldSort("_doc"));
|
||||
}
|
||||
startTime.set(System.nanoTime());
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("executing initial scroll against {}{}",
|
||||
firstSearchRequest.indices() == null || firstSearchRequest.indices().length == 0 ? "all indices"
|
||||
: firstSearchRequest.indices(),
|
||||
firstSearchRequest.types() == null || firstSearchRequest.types().length == 0 ? ""
|
||||
: firstSearchRequest.types());
|
||||
}
|
||||
client.search(firstSearchRequest, new ActionListener<SearchResponse>() {
|
||||
@Override
|
||||
public void onResponse(SearchResponse response) {
|
||||
logger.debug("[{}] documents match query", response.getHits().getTotalHits());
|
||||
onScrollResponse(response);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
logger.error("failed while executing the initial scroll request", e);
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
} catch (Throwable t) {
|
||||
finishHim(t);
|
||||
}
|
||||
}
|
||||
|
||||
void onScrollResponse(SearchResponse searchResponse) {
|
||||
scroll.set(searchResponse.getScrollId());
|
||||
if (searchResponse.getShardFailures() != null && searchResponse.getShardFailures().length > 0) {
|
||||
Collections.addAll(searchFailures, searchResponse.getShardFailures());
|
||||
startNormalTermination();
|
||||
return;
|
||||
}
|
||||
threadPool.generic().execute(new AbstractRunnable() {
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
try {
|
||||
SearchHit[] docs = searchResponse.getHits().getHits();
|
||||
logger.debug("scroll returned [{}] documents with a scroll id of [{}]", docs.length, searchResponse.getScrollId());
|
||||
if (docs.length == 0) {
|
||||
startNormalTermination();
|
||||
return;
|
||||
}
|
||||
batches.incrementAndGet();
|
||||
List<SearchHit> docsIterable = Arrays.asList(docs);
|
||||
if (mainRequest.getSize() != SIZE_ALL_MATCHES) {
|
||||
// Truncate the docs if we have more than the request size
|
||||
long remaining = max(0, mainRequest.getSize() - successfullyProcessed());
|
||||
if (remaining < docs.length) {
|
||||
docsIterable = docsIterable.subList(0, (int) remaining);
|
||||
}
|
||||
}
|
||||
BulkRequest request = buildBulk(docsIterable);
|
||||
if (request.requests().isEmpty()) {
|
||||
/*
|
||||
* If we noop-ed the entire batch then just skip to the next
|
||||
* batch or the BulkRequest would fail validation.
|
||||
*/
|
||||
startNextScrollRequest();
|
||||
return;
|
||||
}
|
||||
request.timeout(mainRequest.getTimeout());
|
||||
request.consistencyLevel(mainRequest.getConsistency());
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("sending [{}] entry, [{}] bulk request", request.requests().size(),
|
||||
new ByteSizeValue(request.estimatedSizeInBytes()));
|
||||
}
|
||||
// NOCOMMIT handle rejections
|
||||
client.bulk(request, new ActionListener<BulkResponse>() {
|
||||
@Override
|
||||
public void onResponse(BulkResponse response) {
|
||||
onBulkResponse(response);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
finishHim(e);
|
||||
}
|
||||
});
|
||||
} catch (Throwable t) {
|
||||
finishHim(t);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
finishHim(t);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void onBulkResponse(BulkResponse response) {
|
||||
try {
|
||||
Set<String> destinationIndicesThisBatch = new HashSet<>();
|
||||
for (BulkItemResponse item : response) {
|
||||
if (item.isFailed()) {
|
||||
recordFailure(item.getFailure());
|
||||
continue;
|
||||
}
|
||||
|
||||
switch (item.getOpType()) {
|
||||
case "index":
|
||||
case "create":
|
||||
IndexResponse ir = item.getResponse();
|
||||
if (ir.isCreated()) {
|
||||
created.incrementAndGet();
|
||||
} else {
|
||||
updated.incrementAndGet();
|
||||
}
|
||||
break;
|
||||
case "delete":
|
||||
deleted.incrementAndGet();
|
||||
break;
|
||||
default:
|
||||
throw new IllegalArgumentException("Unknown op type: " + item.getOpType());
|
||||
}
|
||||
// Track the indexes we've seen so we can refresh them if requested
|
||||
destinationIndices.add(item.getIndex());
|
||||
}
|
||||
destinationIndices.addAll(destinationIndicesThisBatch);
|
||||
|
||||
if (false == indexingFailures.isEmpty()) {
|
||||
startNormalTermination();
|
||||
return;
|
||||
}
|
||||
|
||||
if (mainRequest.getSize() != SIZE_ALL_MATCHES && successfullyProcessed() >= mainRequest.getSize()) {
|
||||
// We've processed all the requested docs.
|
||||
startNormalTermination();
|
||||
return;
|
||||
}
|
||||
startNextScrollRequest();
|
||||
} catch (Throwable t) {
|
||||
finishHim(t);
|
||||
}
|
||||
}
|
||||
|
||||
void startNextScrollRequest() {
|
||||
SearchScrollRequest request = new SearchScrollRequest(mainRequest);
|
||||
request.scrollId(scroll.get()).scroll(firstSearchRequest.scroll());
|
||||
client.searchScroll(request, new ActionListener<SearchResponse>() {
|
||||
@Override
|
||||
public void onResponse(SearchResponse response) {
|
||||
onScrollResponse(response);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
finishHim(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private void recordFailure(Failure failure) {
|
||||
if (failure.getStatus() == CONFLICT) {
|
||||
versionConflicts.incrementAndGet();
|
||||
if (false == mainRequest.isAbortOnVersionConflict()) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
indexingFailures.add(failure);
|
||||
}
|
||||
|
||||
void startNormalTermination() {
|
||||
if (false == mainRequest.isRefresh()) {
|
||||
finishHim(null);
|
||||
return;
|
||||
}
|
||||
RefreshRequest refresh = new RefreshRequest(mainRequest);
|
||||
refresh.indices(destinationIndices.toArray(new String[destinationIndices.size()]));
|
||||
client.admin().indices().refresh(refresh, new ActionListener<RefreshResponse>() {
|
||||
@Override
|
||||
public void onResponse(RefreshResponse response) {
|
||||
finishHim(null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
finishHim(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Finish the request.
|
||||
*
|
||||
* @param failure
|
||||
* the failure that caused the request to fail prematurely if not
|
||||
* null. If not null this doesn't mean the request was entirely
|
||||
* successful - it may have accumulated failures in the failures
|
||||
* list.
|
||||
*/
|
||||
void finishHim(Throwable failure) {
|
||||
String scrollId = scroll.get();
|
||||
if (Strings.hasLength(scrollId)) {
|
||||
/*
|
||||
* Fire off the clear scroll but don't wait for it it return before
|
||||
* we send the use their response.
|
||||
*/
|
||||
ClearScrollRequest clearScrollRequest = new ClearScrollRequest(mainRequest);
|
||||
clearScrollRequest.addScrollId(scrollId);
|
||||
client.clearScroll(clearScrollRequest, new ActionListener<ClearScrollResponse>() {
|
||||
@Override
|
||||
public void onResponse(ClearScrollResponse response) {
|
||||
logger.debug("Freed [{}] contexts", response.getNumFreed());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
logger.warn("Failed to clear scroll [" + scrollId + ']', e);
|
||||
}
|
||||
});
|
||||
}
|
||||
if (failure == null) {
|
||||
listener.onResponse(buildResponse(TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime.get())));
|
||||
} else {
|
||||
listener.onFailure(failure);
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,239 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugin.reindex;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.index.mapper.internal.IdFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.IndexFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.RoutingFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.SourceFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.TTLFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.TimestampFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.VersionFieldMapper;
|
||||
import org.elasticsearch.script.CompiledScript;
|
||||
import org.elasticsearch.script.ExecutableScript;
|
||||
import org.elasticsearch.script.ScriptContext;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.SearchHitField;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
/**
|
||||
* Abstract base for scrolling across a search and executing bulk indexes on all
|
||||
* results.
|
||||
*/
|
||||
public abstract class AbstractAsyncBulkIndexByScrollAction<Request extends AbstractBulkIndexByScrollRequest<Request>, Response extends BulkIndexByScrollResponse>
|
||||
extends AbstractAsyncBulkByScrollAction<Request, Response> {
|
||||
|
||||
private final AtomicLong noops = new AtomicLong(0);
|
||||
private final ScriptService scriptService;
|
||||
private final CompiledScript script;
|
||||
|
||||
public AbstractAsyncBulkIndexByScrollAction(ESLogger logger, ScriptService scriptService, Client client, ThreadPool threadPool,
|
||||
Request mainRequest, SearchRequest firstSearchRequest, ActionListener<Response> listener) {
|
||||
super(logger, client, threadPool, mainRequest, firstSearchRequest, listener);
|
||||
this.scriptService = scriptService;
|
||||
if (mainRequest.getScript() == null) {
|
||||
script = null;
|
||||
} else {
|
||||
script = scriptService.compile(mainRequest.getScript(), ScriptContext.Standard.UPDATE, mainRequest);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the IndexRequest for a single search hit. This shouldn't handle
|
||||
* metadata or the script. That will be handled by copyMetadata and
|
||||
* applyScript functions that can be overridden.
|
||||
*/
|
||||
protected abstract IndexRequest buildIndexRequest(SearchHit doc);
|
||||
|
||||
/**
|
||||
* The number of noops (skipped bulk items) as part of this request.
|
||||
*/
|
||||
public long noops() {
|
||||
return noops.get();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected BulkRequest buildBulk(Iterable<SearchHit> docs) {
|
||||
BulkRequest bulkRequest = new BulkRequest(mainRequest);
|
||||
ExecutableScript executableScript = null;
|
||||
Map<String, Object> scriptCtx = null;
|
||||
|
||||
for (SearchHit doc : docs) {
|
||||
IndexRequest index = buildIndexRequest(doc);
|
||||
copyMetadata(index, doc);
|
||||
if (script != null) {
|
||||
if (executableScript == null) {
|
||||
executableScript = scriptService.executable(script, mainRequest.getScript().getParams());
|
||||
scriptCtx = new HashMap<>();
|
||||
}
|
||||
if (false == applyScript(index, doc, executableScript, scriptCtx)) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
bulkRequest.add(index);
|
||||
}
|
||||
|
||||
return bulkRequest;
|
||||
}
|
||||
|
||||
/**
|
||||
* Copies the metadata from a hit to the index request.
|
||||
*/
|
||||
protected void copyMetadata(IndexRequest index, SearchHit doc) {
|
||||
index.parent(fieldValue(doc, ParentFieldMapper.NAME));
|
||||
copyRouting(index, doc);
|
||||
// Comes back as a Long but needs to be a string
|
||||
Long timestamp = fieldValue(doc, TimestampFieldMapper.NAME);
|
||||
if (timestamp != null) {
|
||||
index.timestamp(timestamp.toString());
|
||||
}
|
||||
index.ttl(fieldValue(doc, TTLFieldMapper.NAME));
|
||||
}
|
||||
|
||||
/**
|
||||
* Part of copyMetadata but called out individual for easy overwriting.
|
||||
*/
|
||||
protected void copyRouting(IndexRequest index, SearchHit doc) {
|
||||
index.routing(fieldValue(doc, RoutingFieldMapper.NAME));
|
||||
}
|
||||
|
||||
protected <T> T fieldValue(SearchHit doc, String fieldName) {
|
||||
SearchHitField field = doc.field(fieldName);
|
||||
return field == null ? null : field.value();
|
||||
}
|
||||
|
||||
/**
|
||||
* Apply a script to the request.
|
||||
*
|
||||
* @return is this request still ok to apply (true) or is it a noop (false)
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
protected boolean applyScript(IndexRequest index, SearchHit doc, ExecutableScript script, final Map<String, Object> ctx) {
|
||||
if (script == null) {
|
||||
return true;
|
||||
}
|
||||
ctx.put(IndexFieldMapper.NAME, doc.index());
|
||||
ctx.put(TypeFieldMapper.NAME, doc.type());
|
||||
ctx.put(IdFieldMapper.NAME, doc.id());
|
||||
Long oldVersion = doc.getVersion();
|
||||
ctx.put(VersionFieldMapper.NAME, oldVersion);
|
||||
String oldParent = fieldValue(doc, ParentFieldMapper.NAME);
|
||||
ctx.put(ParentFieldMapper.NAME, oldParent);
|
||||
String oldRouting = fieldValue(doc, RoutingFieldMapper.NAME);
|
||||
ctx.put(RoutingFieldMapper.NAME, oldRouting);
|
||||
Long oldTimestamp = fieldValue(doc, TimestampFieldMapper.NAME);
|
||||
ctx.put(TimestampFieldMapper.NAME, oldTimestamp);
|
||||
Long oldTTL = fieldValue(doc, TTLFieldMapper.NAME);
|
||||
ctx.put(TTLFieldMapper.NAME, oldTTL);
|
||||
ctx.put(SourceFieldMapper.NAME, index.sourceAsMap());
|
||||
ctx.put("op", "update");
|
||||
script.setNextVar("ctx", ctx);
|
||||
script.run();
|
||||
Map<String, Object> resultCtx = (Map<String, Object>) script.unwrap(ctx);
|
||||
String newOp = (String) resultCtx.remove("op");
|
||||
if (newOp == null) {
|
||||
throw new IllegalArgumentException("Script cleared op!");
|
||||
}
|
||||
if ("noop".equals(newOp)) {
|
||||
noops.incrementAndGet();
|
||||
return false;
|
||||
}
|
||||
if (false == "update".equals(newOp)) {
|
||||
throw new IllegalArgumentException("Invalid op [" + newOp + ']');
|
||||
}
|
||||
|
||||
/*
|
||||
* It'd be lovely to only set the source if we know its been modified
|
||||
* but it isn't worth keeping two copies of it around just to check!
|
||||
*/
|
||||
index.source((Map<String, Object>) resultCtx.remove(SourceFieldMapper.NAME));
|
||||
|
||||
Object newValue = ctx.remove(IndexFieldMapper.NAME);
|
||||
if (false == doc.index().equals(newValue)) {
|
||||
scriptChangedIndex(index, newValue);
|
||||
}
|
||||
newValue = ctx.remove(TypeFieldMapper.NAME);
|
||||
if (false == doc.type().equals(newValue)) {
|
||||
scriptChangedType(index, newValue);
|
||||
}
|
||||
newValue = ctx.remove(IdFieldMapper.NAME);
|
||||
if (false == doc.id().equals(newValue)) {
|
||||
scriptChangedId(index, newValue);
|
||||
}
|
||||
newValue = ctx.remove(VersionFieldMapper.NAME);
|
||||
if (false == Objects.equals(oldVersion, newValue)) {
|
||||
scriptChangedVersion(index, newValue);
|
||||
}
|
||||
newValue = ctx.remove(ParentFieldMapper.NAME);
|
||||
if (false == Objects.equals(oldParent, newValue)) {
|
||||
scriptChangedParent(index, newValue);
|
||||
}
|
||||
/*
|
||||
* Its important that routing comes after parent in case you want to
|
||||
* change them both.
|
||||
*/
|
||||
newValue = ctx.remove(RoutingFieldMapper.NAME);
|
||||
if (false == Objects.equals(oldRouting, newValue)) {
|
||||
scriptChangedRouting(index, newValue);
|
||||
}
|
||||
newValue = ctx.remove(TimestampFieldMapper.NAME);
|
||||
if (false == Objects.equals(oldTimestamp, newValue)) {
|
||||
scriptChangedTimestamp(index, newValue);
|
||||
}
|
||||
newValue = ctx.remove(TTLFieldMapper.NAME);
|
||||
if (false == Objects.equals(oldTTL, newValue)) {
|
||||
scriptChangedTTL(index, newValue);
|
||||
}
|
||||
if (false == ctx.isEmpty()) {
|
||||
throw new IllegalArgumentException("Invalid fields added to ctx [" + String.join(",", ctx.keySet()) + ']');
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
protected abstract void scriptChangedIndex(IndexRequest index, Object to);
|
||||
|
||||
protected abstract void scriptChangedType(IndexRequest index, Object to);
|
||||
|
||||
protected abstract void scriptChangedId(IndexRequest index, Object to);
|
||||
|
||||
protected abstract void scriptChangedVersion(IndexRequest index, Object to);
|
||||
|
||||
protected abstract void scriptChangedRouting(IndexRequest index, Object to);
|
||||
|
||||
protected abstract void scriptChangedParent(IndexRequest index, Object to);
|
||||
|
||||
protected abstract void scriptChangedTimestamp(IndexRequest index, Object to);
|
||||
|
||||
protected abstract void scriptChangedTTL(IndexRequest index, Object to);
|
||||
}
|
@ -0,0 +1,246 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugin.reindex;
|
||||
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.WriteConsistencyLevel;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.support.replication.ReplicationRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
|
||||
public abstract class AbstractBulkByScrollRequest<Self extends AbstractBulkByScrollRequest<Self>>
|
||||
extends ActionRequest<Self> {
|
||||
public static final int SIZE_ALL_MATCHES = -1;
|
||||
private static final TimeValue DEFAULT_SCROLL_TIMEOUT = TimeValue.timeValueMinutes(5);
|
||||
private static final int DEFAULT_SCROLL_SIZE = 100;
|
||||
|
||||
/**
|
||||
* The search to be executed.
|
||||
*/
|
||||
private SearchRequest source;
|
||||
|
||||
/**
|
||||
* Maximum number of processed documents. Defaults to -1 meaning process all
|
||||
* documents.
|
||||
*/
|
||||
private int size = SIZE_ALL_MATCHES;
|
||||
|
||||
/**
|
||||
* Should version conflicts cause aborts? Defaults to true.
|
||||
*/
|
||||
private boolean abortOnVersionConflict = true;
|
||||
|
||||
/**
|
||||
* Call refresh on the indexes we've written to after the request ends?
|
||||
*/
|
||||
private boolean refresh = false;
|
||||
|
||||
/**
|
||||
* Timeout to wait for the shards on to be available for each bulk request?
|
||||
*/
|
||||
private TimeValue timeout = ReplicationRequest.DEFAULT_TIMEOUT;
|
||||
|
||||
/**
|
||||
* Consistency level for write requests.
|
||||
*/
|
||||
private WriteConsistencyLevel consistency = WriteConsistencyLevel.DEFAULT;
|
||||
|
||||
public AbstractBulkByScrollRequest() {
|
||||
}
|
||||
|
||||
public AbstractBulkByScrollRequest(SearchRequest source) {
|
||||
this.source = source;
|
||||
|
||||
// Set the defaults which differ from SearchRequest's defaults.
|
||||
source.scroll(DEFAULT_SCROLL_TIMEOUT);
|
||||
source.source(new SearchSourceBuilder());
|
||||
source.source().version(true);
|
||||
source.source().size(DEFAULT_SCROLL_SIZE);
|
||||
}
|
||||
|
||||
/**
|
||||
* `this` cast to Self. Used for building fluent methods without cast
|
||||
* warnings.
|
||||
*/
|
||||
protected abstract Self self();
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
ActionRequestValidationException e = source.validate();
|
||||
if (source.source().from() != -1) {
|
||||
e = addValidationError("from is not supported in this context", e);
|
||||
}
|
||||
if (false == (size == -1 || size > 0)) {
|
||||
e = addValidationError(
|
||||
"size should be greater than 0 if the request is limited to some number of documents or -1 if it isn't but it was ["
|
||||
+ size + "]",
|
||||
e);
|
||||
}
|
||||
return e;
|
||||
}
|
||||
|
||||
/**
|
||||
* Maximum number of processed documents. Defaults to -1 meaning process all
|
||||
* documents.
|
||||
*/
|
||||
public int getSize() {
|
||||
return size;
|
||||
}
|
||||
|
||||
/**
|
||||
* Maximum number of processed documents. Defaults to -1 meaning process all
|
||||
* documents.
|
||||
*/
|
||||
public Self setSize(int size) {
|
||||
this.size = size;
|
||||
return self();
|
||||
}
|
||||
|
||||
/**
|
||||
* Should version conflicts cause aborts? Defaults to false.
|
||||
*/
|
||||
public boolean isAbortOnVersionConflict() {
|
||||
return abortOnVersionConflict;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should version conflicts cause aborts? Defaults to false.
|
||||
*/
|
||||
public Self setAbortOnVersionConflict(boolean abortOnVersionConflict) {
|
||||
this.abortOnVersionConflict = abortOnVersionConflict;
|
||||
return self();
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets abortOnVersionConflict based on REST-friendly names.
|
||||
*/
|
||||
public void setConflicts(String conflicts) {
|
||||
switch (conflicts) {
|
||||
case "proceed":
|
||||
setAbortOnVersionConflict(false);
|
||||
return;
|
||||
case "abort":
|
||||
setAbortOnVersionConflict(true);
|
||||
return;
|
||||
default:
|
||||
throw new IllegalArgumentException("conflicts may only be \"proceed\" or \"abort\" but was [" + conflicts + "]");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The search request that matches the documents to process.
|
||||
*/
|
||||
public SearchRequest getSource() {
|
||||
return source;
|
||||
}
|
||||
|
||||
/**
|
||||
* Call refresh on the indexes we've written to after the request ends?
|
||||
*/
|
||||
public boolean isRefresh() {
|
||||
return refresh;
|
||||
}
|
||||
|
||||
/**
|
||||
* Call refresh on the indexes we've written to after the request ends?
|
||||
*/
|
||||
public Self setRefresh(boolean refresh) {
|
||||
this.refresh = refresh;
|
||||
return self();
|
||||
}
|
||||
|
||||
/**
|
||||
* Timeout to wait for the shards on to be available for each bulk request?
|
||||
*/
|
||||
public TimeValue getTimeout() {
|
||||
return timeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* Timeout to wait for the shards on to be available for each bulk request?
|
||||
*/
|
||||
public Self setTimeout(TimeValue timeout) {
|
||||
this.timeout = timeout;
|
||||
return self();
|
||||
}
|
||||
|
||||
/**
|
||||
* Consistency level for write requests.
|
||||
*/
|
||||
public WriteConsistencyLevel getConsistency() {
|
||||
return consistency;
|
||||
}
|
||||
|
||||
/**
|
||||
* Consistency level for write requests.
|
||||
*/
|
||||
public Self setConsistency(WriteConsistencyLevel consistency) {
|
||||
this.consistency = consistency;
|
||||
return self();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
source = new SearchRequest();
|
||||
source.readFrom(in);
|
||||
abortOnVersionConflict = in.readBoolean();
|
||||
size = in.readVInt();
|
||||
refresh = in.readBoolean();
|
||||
timeout = TimeValue.readTimeValue(in);
|
||||
consistency = WriteConsistencyLevel.fromId(in.readByte());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
source.writeTo(out);
|
||||
out.writeBoolean(abortOnVersionConflict);
|
||||
out.writeVInt(size);
|
||||
out.writeBoolean(refresh);
|
||||
timeout.writeTo(out);
|
||||
out.writeByte(consistency.id());
|
||||
}
|
||||
|
||||
/**
|
||||
* Append a short description of the search request to a StringBuilder. Used
|
||||
* to make toString.
|
||||
*/
|
||||
protected void searchToString(StringBuilder b) {
|
||||
if (source.indices() != null && source.indices().length != 0) {
|
||||
b.append(Arrays.toString(source.indices()));
|
||||
} else {
|
||||
b.append("[all indices]");
|
||||
}
|
||||
if (source.types() != null && source.types().length != 0) {
|
||||
b.append(source.types());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,106 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugin.reindex;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.WriteConsistencyLevel;
|
||||
import org.elasticsearch.action.search.SearchRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
|
||||
public abstract class AbstractBulkByScrollRequestBuilder<Request extends AbstractBulkByScrollRequest<Request>, Response extends ActionResponse, Self extends AbstractBulkByScrollRequestBuilder<Request, Response, Self>>
|
||||
extends ActionRequestBuilder<Request, Response, Self> {
|
||||
private final SearchRequestBuilder source;
|
||||
|
||||
protected AbstractBulkByScrollRequestBuilder(ElasticsearchClient client,
|
||||
Action<Request, Response, Self> action, SearchRequestBuilder source, Request request) {
|
||||
super(client, action, request);
|
||||
this.source = source;
|
||||
}
|
||||
|
||||
protected abstract Self self();
|
||||
|
||||
/**
|
||||
* The search used to find documents to process.
|
||||
*/
|
||||
public SearchRequestBuilder source() {
|
||||
return source;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the source indices.
|
||||
*/
|
||||
public Self source(String... indices) {
|
||||
source.setIndices(indices);
|
||||
return self();
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the query that will filter the source. Just a convenience method for
|
||||
* easy chaining.
|
||||
*/
|
||||
public Self filter(QueryBuilder<?> filter) {
|
||||
source.setQuery(filter);
|
||||
return self();
|
||||
}
|
||||
|
||||
/**
|
||||
* The maximum number of documents to attempt.
|
||||
*/
|
||||
public Self size(int size) {
|
||||
request.setSize(size);
|
||||
return self();
|
||||
}
|
||||
|
||||
/**
|
||||
* Should we version conflicts cause the action to abort?
|
||||
*/
|
||||
public Self abortOnVersionConflict(boolean abortOnVersionConflict) {
|
||||
request.setAbortOnVersionConflict(abortOnVersionConflict);
|
||||
return self();
|
||||
}
|
||||
|
||||
/**
|
||||
* Call refresh on the indexes we've written to after the request ends?
|
||||
*/
|
||||
public Self refresh(boolean refresh) {
|
||||
request.setRefresh(refresh);
|
||||
return self();
|
||||
}
|
||||
|
||||
/**
|
||||
* Timeout to wait for the shards on to be available for each bulk request.
|
||||
*/
|
||||
public Self timeout(TimeValue timeout) {
|
||||
request.setTimeout(timeout);
|
||||
return self();
|
||||
}
|
||||
|
||||
/**
|
||||
* Consistency level for write requests.
|
||||
*/
|
||||
public Self consistency(WriteConsistencyLevel consistency) {
|
||||
request.setConsistency(consistency);
|
||||
return self();
|
||||
}
|
||||
}
|
@ -0,0 +1,80 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugin.reindex;
|
||||
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.script.Script;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public abstract class AbstractBulkIndexByScrollRequest<Self extends AbstractBulkIndexByScrollRequest<Self>>
|
||||
extends AbstractBulkByScrollRequest<Self> {
|
||||
/**
|
||||
* Script to modify the documents before they are processed.
|
||||
*/
|
||||
private Script script;
|
||||
|
||||
public AbstractBulkIndexByScrollRequest() {
|
||||
}
|
||||
|
||||
public AbstractBulkIndexByScrollRequest(SearchRequest source) {
|
||||
super(source);
|
||||
}
|
||||
|
||||
/**
|
||||
* Script to modify the documents before they are processed.
|
||||
*/
|
||||
public Script getScript() {
|
||||
return script;
|
||||
}
|
||||
|
||||
/**
|
||||
* Script to modify the documents before they are processed.
|
||||
*/
|
||||
public Self setScript(@Nullable Script script) {
|
||||
this.script = script;
|
||||
return self();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
if (in.readBoolean()) {
|
||||
script = Script.readScript(in);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeOptionalStreamable(script);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void searchToString(StringBuilder b) {
|
||||
super.searchToString(b);
|
||||
if (script != null) {
|
||||
b.append(" updated with [").append(script).append(']');
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,43 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugin.reindex;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.search.SearchRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.script.Script;
|
||||
|
||||
public abstract class AbstractBulkIndexByScrollRequestBuilder<Request extends AbstractBulkIndexByScrollRequest<Request>, Response extends ActionResponse, Self extends AbstractBulkIndexByScrollRequestBuilder<Request, Response, Self>>
|
||||
extends AbstractBulkByScrollRequestBuilder<Request, Response, Self> {
|
||||
|
||||
protected AbstractBulkIndexByScrollRequestBuilder(ElasticsearchClient client,
|
||||
Action<Request, Response, Self> action, SearchRequestBuilder search, Request request) {
|
||||
super(client, action, search, request);
|
||||
}
|
||||
|
||||
/**
|
||||
* Script to modify the documents before they are processed.
|
||||
*/
|
||||
public Self script(Script script) {
|
||||
request.setScript(script);
|
||||
return self();
|
||||
}
|
||||
}
|
@ -0,0 +1,184 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugin.reindex;
|
||||
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse.Failure;
|
||||
import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import static java.lang.Math.min;
|
||||
import static java.util.Collections.unmodifiableList;
|
||||
import static org.elasticsearch.action.search.ShardSearchFailure.readShardSearchFailure;
|
||||
|
||||
/**
|
||||
* Response used for actions that index many documents using a scroll request.
|
||||
*/
|
||||
public class BulkIndexByScrollResponse extends ActionResponse implements ToXContent {
|
||||
static final String TOOK_FIELD = "took";
|
||||
static final String UPDATED_FIELD = "updated";
|
||||
static final String BATCHES_FIELD = "batches";
|
||||
static final String VERSION_CONFLICTS_FIELD = "version_conflicts";
|
||||
static final String NOOPS_FIELD = "noops";
|
||||
static final String FAILURES_FIELD = "failures";
|
||||
|
||||
private long took;
|
||||
private long updated;
|
||||
private int batches;
|
||||
private long versionConflicts;
|
||||
private long noops;
|
||||
private List<Failure> indexingFailures;
|
||||
private List<ShardSearchFailure> searchFailures;
|
||||
|
||||
public BulkIndexByScrollResponse() {
|
||||
}
|
||||
|
||||
public BulkIndexByScrollResponse(long took, long updated, int batches, long versionConflicts, long noops,
|
||||
List<Failure> indexingFailures, List<ShardSearchFailure> searchFailures) {
|
||||
this.took = took;
|
||||
this.updated = updated;
|
||||
this.batches = batches;
|
||||
this.versionConflicts = versionConflicts;
|
||||
this.noops = noops;
|
||||
this.indexingFailures = indexingFailures;
|
||||
this.searchFailures = searchFailures;
|
||||
}
|
||||
|
||||
public long getTook() {
|
||||
return took;
|
||||
}
|
||||
|
||||
public long getUpdated() {
|
||||
return updated;
|
||||
}
|
||||
|
||||
public int getBatches() {
|
||||
return batches;
|
||||
}
|
||||
|
||||
public long getVersionConflicts() {
|
||||
return versionConflicts;
|
||||
}
|
||||
|
||||
public long getNoops() {
|
||||
return noops;
|
||||
}
|
||||
|
||||
/**
|
||||
* Indexing failures.
|
||||
*/
|
||||
public List<Failure> getIndexingFailures() {
|
||||
return indexingFailures;
|
||||
}
|
||||
|
||||
public List<ShardSearchFailure> getSearchFailures() {
|
||||
return searchFailures;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeVLong(took);
|
||||
out.writeVLong(updated);
|
||||
out.writeVInt(batches);
|
||||
out.writeVLong(versionConflicts);
|
||||
out.writeVLong(noops);
|
||||
out.writeVInt(indexingFailures.size());
|
||||
for (Failure failure: indexingFailures) {
|
||||
failure.writeTo(out);
|
||||
}
|
||||
out.writeVInt(searchFailures.size());
|
||||
for (ShardSearchFailure failure: searchFailures) {
|
||||
failure.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
took = in.readVLong();
|
||||
updated = in.readVLong();
|
||||
batches = in.readVInt();
|
||||
versionConflicts = in.readVLong();
|
||||
noops = in.readVLong();
|
||||
int indexingFailuresCount = in.readVInt();
|
||||
List<Failure> indexingFailures = new ArrayList<>(indexingFailuresCount);
|
||||
for (int i = 0; i < indexingFailuresCount; i++) {
|
||||
indexingFailures.add(Failure.PROTOTYPE.readFrom(in));
|
||||
}
|
||||
this.indexingFailures = unmodifiableList(indexingFailures);
|
||||
int searchFailuresCount = in.readVInt();
|
||||
List<ShardSearchFailure> searchFailures = new ArrayList<>(searchFailuresCount);
|
||||
for (int i = 0; i < searchFailuresCount; i++) {
|
||||
searchFailures.add(readShardSearchFailure(in));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field(TOOK_FIELD, took);
|
||||
builder.field(UPDATED_FIELD, updated);
|
||||
builder.field(BATCHES_FIELD, batches);
|
||||
builder.field(VERSION_CONFLICTS_FIELD, versionConflicts);
|
||||
builder.field(NOOPS_FIELD, noops);
|
||||
builder.startArray(FAILURES_FIELD);
|
||||
for (Failure failure: indexingFailures) {
|
||||
builder.startObject();
|
||||
failure.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
}
|
||||
for (ShardSearchFailure failure: searchFailures) {
|
||||
builder.startObject();
|
||||
failure.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder builder = new StringBuilder();
|
||||
builder.append(toStringName()).append("[");
|
||||
builder.append("took=").append(took);
|
||||
builder.append(",updated=").append(updated);
|
||||
builder.append(",batches=").append(batches);
|
||||
builder.append(",versionConflicts=").append(versionConflicts);
|
||||
builder.append(",noops=").append(noops);
|
||||
builder.append(",indexing_failures=").append(getIndexingFailures().subList(0, min(3, getIndexingFailures().size())));
|
||||
builder.append(",search_failures=").append(getSearchFailures().subList(0, min(3, getSearchFailures().size())));
|
||||
innerToString(builder);
|
||||
return builder.append("]").toString();
|
||||
}
|
||||
|
||||
protected String toStringName() {
|
||||
return "BulkIndexByScrollResponse";
|
||||
}
|
||||
|
||||
protected void innerToString(StringBuilder builder) {
|
||||
}
|
||||
}
|
@ -0,0 +1,46 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugin.reindex;
|
||||
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse.Failure;
|
||||
import org.elasticsearch.rest.RestChannel;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.rest.action.support.RestToXContentListener;
|
||||
|
||||
/**
|
||||
* Just like RestToXContentListener but will return higher than 200 status if
|
||||
* there are any failures.
|
||||
*/
|
||||
public class BulkIndexByScrollResponseContentListener<R extends BulkIndexByScrollResponse> extends RestToXContentListener<R> {
|
||||
public BulkIndexByScrollResponseContentListener(RestChannel channel) {
|
||||
super(channel);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected RestStatus getStatus(R response) {
|
||||
RestStatus status = RestStatus.OK;
|
||||
for (Failure failure : response.getIndexingFailures()) {
|
||||
if (failure.getStatus().getStatus() > status.getStatus()) {
|
||||
status = failure.getStatus();
|
||||
}
|
||||
}
|
||||
return status;
|
||||
}
|
||||
}
|
@ -0,0 +1,42 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugin.reindex;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class ReindexAction extends Action<ReindexRequest, ReindexResponse, ReindexRequestBuilder> {
|
||||
public static final ReindexAction INSTANCE = new ReindexAction();
|
||||
public static final String NAME = "indices:data/write/reindex";
|
||||
|
||||
private ReindexAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReindexRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new ReindexRequestBuilder(client, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReindexResponse newResponse() {
|
||||
return new ReindexResponse();
|
||||
}
|
||||
}
|
@ -0,0 +1,48 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugin.reindex;
|
||||
|
||||
import org.elasticsearch.action.ActionModule;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.rest.RestModule;
|
||||
|
||||
public class ReindexPlugin extends Plugin {
|
||||
public static final String NAME = "reindex";
|
||||
|
||||
@Override
|
||||
public String name() {
|
||||
return NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String description() {
|
||||
return "The Reindex Plugin adds APIs to reindex from one index to another or update documents in place.";
|
||||
}
|
||||
|
||||
public void onModule(ActionModule actionModule) {
|
||||
actionModule.registerAction(ReindexAction.INSTANCE, TransportReindexAction.class);
|
||||
actionModule.registerAction(UpdateByQueryAction.INSTANCE, TransportUpdateByQueryAction.class);
|
||||
}
|
||||
|
||||
public void onModule(RestModule restModule) {
|
||||
restModule.addRestAction(RestReindexAction.class);
|
||||
restModule.addRestAction(RestUpdateByQueryAction.class);
|
||||
}
|
||||
}
|
@ -0,0 +1,126 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugin.reindex;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
import static org.elasticsearch.index.VersionType.INTERNAL;
|
||||
|
||||
public class ReindexRequest extends AbstractBulkIndexByScrollRequest<ReindexRequest> {
|
||||
/**
|
||||
* Prototype for index requests.
|
||||
*/
|
||||
private IndexRequest destination;
|
||||
|
||||
public ReindexRequest() {
|
||||
}
|
||||
|
||||
public ReindexRequest(SearchRequest search, IndexRequest destination) {
|
||||
super(search);
|
||||
this.destination = destination;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ReindexRequest self() {
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
ActionRequestValidationException e = super.validate();
|
||||
if (getSource().indices() == null || getSource().indices().length == 0) {
|
||||
e = addValidationError("use _all if you really want to copy from all existing indexes", e);
|
||||
}
|
||||
/*
|
||||
* Note that we don't call index's validator - it won't work because
|
||||
* we'll be filling in portions of it as we receive the docs. But we can
|
||||
* validate some things so we do that below.
|
||||
*/
|
||||
if (destination.index() == null) {
|
||||
e = addValidationError("index must be specified", e);
|
||||
return e;
|
||||
}
|
||||
if (false == routingIsValid()) {
|
||||
e = addValidationError("routing must be unset, [keep], [discard] or [=<some new value>]", e);
|
||||
}
|
||||
if (destination.versionType() == INTERNAL) {
|
||||
if (destination.version() != Versions.MATCH_ANY && destination.version() != Versions.MATCH_DELETED) {
|
||||
e = addValidationError("unsupported version for internal versioning [" + destination.version() + ']', e);
|
||||
}
|
||||
}
|
||||
if (destination.ttl() != -1) {
|
||||
e = addValidationError("setting ttl on destination isn't supported. use scripts instead.", e);
|
||||
}
|
||||
if (destination.timestamp() != null) {
|
||||
e = addValidationError("setting timestamp on destination isn't supported. use scripts instead.", e);
|
||||
}
|
||||
return e;
|
||||
}
|
||||
|
||||
private boolean routingIsValid() {
|
||||
if (destination.routing() == null || destination.routing().startsWith("=")) {
|
||||
return true;
|
||||
}
|
||||
switch (destination.routing()) {
|
||||
case "keep":
|
||||
case "discard":
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
public IndexRequest getDestination() {
|
||||
return destination;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
destination = new IndexRequest();
|
||||
destination.readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
destination.writeTo(out);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder b = new StringBuilder();
|
||||
b.append("reindex from ");
|
||||
searchToString(b);
|
||||
b.append(" to [").append(destination.index()).append(']');
|
||||
if (destination.type() != null) {
|
||||
b.append('[').append(destination.type()).append(']');
|
||||
}
|
||||
return b.toString();
|
||||
}
|
||||
}
|
@ -0,0 +1,70 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugin.reindex;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.action.index.IndexAction;
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.action.search.SearchAction;
|
||||
import org.elasticsearch.action.search.SearchRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class ReindexRequestBuilder extends
|
||||
AbstractBulkIndexByScrollRequestBuilder<ReindexRequest, ReindexResponse, ReindexRequestBuilder> {
|
||||
private final IndexRequestBuilder destination;
|
||||
|
||||
public ReindexRequestBuilder(ElasticsearchClient client,
|
||||
Action<ReindexRequest, ReindexResponse, ReindexRequestBuilder> action) {
|
||||
this(client, action, new SearchRequestBuilder(client, SearchAction.INSTANCE),
|
||||
new IndexRequestBuilder(client, IndexAction.INSTANCE));
|
||||
}
|
||||
|
||||
private ReindexRequestBuilder(ElasticsearchClient client,
|
||||
Action<ReindexRequest, ReindexResponse, ReindexRequestBuilder> action,
|
||||
SearchRequestBuilder search, IndexRequestBuilder destination) {
|
||||
super(client, action, search, new ReindexRequest(search.request(), destination.request()));
|
||||
this.destination = destination;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ReindexRequestBuilder self() {
|
||||
return this;
|
||||
}
|
||||
|
||||
public IndexRequestBuilder destination() {
|
||||
return destination;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the destination index.
|
||||
*/
|
||||
public ReindexRequestBuilder destination(String index) {
|
||||
destination.setIndex(index);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the destination index and type.
|
||||
*/
|
||||
public ReindexRequestBuilder destination(String index, String type) {
|
||||
destination.setIndex(index).setType(type);
|
||||
return this;
|
||||
}
|
||||
}
|
@ -0,0 +1,95 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugin.reindex;
|
||||
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse.Failure;
|
||||
import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
||||
public class ReindexResponse extends BulkIndexByScrollResponse {
|
||||
static final String CREATED_FIELD = "created";
|
||||
|
||||
private long created;
|
||||
|
||||
public ReindexResponse() {
|
||||
}
|
||||
|
||||
public ReindexResponse(long took, long created, long updated, int batches, long versionConflicts, long noops, List<Failure> indexingFailures, List<ShardSearchFailure> searchFailures) {
|
||||
super(took, updated, batches, versionConflicts, noops, indexingFailures, searchFailures);
|
||||
this.created = created;
|
||||
}
|
||||
|
||||
public long getCreated() {
|
||||
return created;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeVLong(created);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
created = in.readVLong();
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
super.toXContent(builder, params);
|
||||
builder.field(CREATED_FIELD, created);
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String toStringName() {
|
||||
return "ReindexResponse";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void innerToString(StringBuilder builder) {
|
||||
builder.append(",created=").append(created);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the first few failures to build a useful for toString.
|
||||
*/
|
||||
protected void truncatedFailures(StringBuilder builder) {
|
||||
builder.append(",failures=[");
|
||||
Iterator<Failure> failures = getIndexingFailures().iterator();
|
||||
int written = 0;
|
||||
while (failures.hasNext() && written < 3) {
|
||||
Failure failure = failures.next();
|
||||
builder.append(failure.getMessage());
|
||||
if (written != 0) {
|
||||
builder.append(", ");
|
||||
}
|
||||
written++;
|
||||
}
|
||||
builder.append(']');
|
||||
}
|
||||
}
|
@ -0,0 +1,168 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugin.reindex;
|
||||
|
||||
import org.elasticsearch.action.WriteConsistencyLevel;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.index.query.QueryParseContext;
|
||||
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
|
||||
import org.elasticsearch.rest.BaseRestHandler;
|
||||
import org.elasticsearch.rest.BytesRestResponse;
|
||||
import org.elasticsearch.rest.RestChannel;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.script.Script;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.common.unit.TimeValue.parseTimeValue;
|
||||
import static org.elasticsearch.plugin.reindex.ReindexAction.INSTANCE;
|
||||
import static org.elasticsearch.rest.RestRequest.Method.POST;
|
||||
import static org.elasticsearch.rest.RestStatus.BAD_REQUEST;
|
||||
|
||||
/**
|
||||
* Expose IndexBySearchRequest over rest.
|
||||
*/
|
||||
public class RestReindexAction extends BaseRestHandler {
|
||||
private static final ObjectParser<ReindexRequest, QueryParseContext> PARSER = new ObjectParser<>("reindex");
|
||||
static {
|
||||
ObjectParser.Parser<SearchRequest, QueryParseContext> sourceParser = (parser, search, context) -> {
|
||||
/*
|
||||
* Extract the parameters that we need from the parser. We could do
|
||||
* away with this hack when search source has an ObjectParser.
|
||||
*/
|
||||
Map<String, Object> source = parser.map();
|
||||
String[] indices = extractStringArray(source, "index");
|
||||
if (indices != null) {
|
||||
search.indices(indices);
|
||||
}
|
||||
String[] types = extractStringArray(source, "type");
|
||||
if (types != null) {
|
||||
search.types(types);
|
||||
}
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(parser.contentType());
|
||||
builder.map(source);
|
||||
parser = parser.contentType().xContent().createParser(builder.bytes());
|
||||
context.reset(parser);
|
||||
search.source().parseXContent(parser, context);
|
||||
};
|
||||
|
||||
ObjectParser<IndexRequest, Void> destParser = new ObjectParser<>("dest");
|
||||
destParser.declareString(IndexRequest::index, new ParseField("index"));
|
||||
destParser.declareString(IndexRequest::type, new ParseField("type"));
|
||||
destParser.declareString(IndexRequest::routing, new ParseField("routing"));
|
||||
destParser.declareString(IndexRequest::opType, new ParseField("opType"));
|
||||
destParser.declareString((s, i) -> s.versionType(VersionType.fromString(i)), new ParseField("versionType"));
|
||||
|
||||
// These exist just so the user can get a nice validation error:
|
||||
destParser.declareString(IndexRequest::timestamp, new ParseField("timestamp"));
|
||||
destParser.declareString((i, ttl) -> i.ttl(parseTimeValue(ttl, TimeValue.timeValueMillis(-1), "ttl").millis()), new ParseField("ttl"));
|
||||
|
||||
PARSER.declareField((p, v, c) -> sourceParser.parse(p, v.getSource(), c), new ParseField("source"), ValueType.OBJECT);
|
||||
PARSER.declareField((p, v, c) -> destParser.parse(p, v.getDestination(), null), new ParseField("dest"), ValueType.OBJECT);
|
||||
PARSER.declareInt(ReindexRequest::setSize, new ParseField("size"));
|
||||
PARSER.declareField((p, v, c) -> v.setScript(Script.parse(p, c.parseFieldMatcher())), new ParseField("script"), ValueType.OBJECT);
|
||||
PARSER.declareString(ReindexRequest::setConflicts, new ParseField("conflicts"));
|
||||
}
|
||||
|
||||
private IndicesQueriesRegistry indicesQueriesRegistry;
|
||||
|
||||
@Inject
|
||||
public RestReindexAction(Settings settings, RestController controller, Client client,
|
||||
IndicesQueriesRegistry indicesQueriesRegistry) {
|
||||
super(settings, controller, client);
|
||||
this.indicesQueriesRegistry = indicesQueriesRegistry;
|
||||
controller.registerHandler(POST, "/_reindex", this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleRequest(RestRequest request, RestChannel channel, Client client) throws IOException {
|
||||
if (false == request.hasContent()) {
|
||||
badRequest(channel, "body required");
|
||||
return;
|
||||
}
|
||||
|
||||
ReindexRequest internalRequest = new ReindexRequest(new SearchRequest(), new IndexRequest());
|
||||
|
||||
try (XContentParser xcontent = XContentFactory.xContent(request.content()).createParser(request.content())) {
|
||||
PARSER.parse(xcontent, internalRequest, new QueryParseContext(indicesQueriesRegistry));
|
||||
} catch (ParsingException e) {
|
||||
logger.warn("Bad request", e);
|
||||
badRequest(channel, e.getDetailedMessage());
|
||||
return;
|
||||
}
|
||||
parseCommon(internalRequest, request);
|
||||
|
||||
client.execute(INSTANCE, internalRequest, new BulkIndexByScrollResponseContentListener<>(channel));
|
||||
}
|
||||
|
||||
private void badRequest(RestChannel channel, String message) {
|
||||
try {
|
||||
XContentBuilder builder = channel.newErrorBuilder();
|
||||
channel.sendResponse(new BytesRestResponse(BAD_REQUEST, builder.startObject().field("error", message).endObject()));
|
||||
} catch (IOException e) {
|
||||
logger.warn("Failed to send response", e);
|
||||
}
|
||||
}
|
||||
|
||||
public static void parseCommon(AbstractBulkByScrollRequest<?> internalRequest, RestRequest request) {
|
||||
internalRequest.setRefresh(request.paramAsBoolean("refresh", internalRequest.isRefresh()));
|
||||
internalRequest.setTimeout(request.paramAsTime("timeout", internalRequest.getTimeout()));
|
||||
String consistency = request.param("consistency");
|
||||
if (consistency != null) {
|
||||
internalRequest.setConsistency(WriteConsistencyLevel.fromString(consistency));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Yank a string array from a map. Emulates XContent's permissive String to
|
||||
* String array conversions.
|
||||
*/
|
||||
private static String[] extractStringArray(Map<String, Object> source, String name) {
|
||||
Object value = source.remove(name);
|
||||
if (value == null) {
|
||||
return null;
|
||||
}
|
||||
if (value instanceof List) {
|
||||
@SuppressWarnings("unchecked")
|
||||
List<String> list = (List<String>) value;
|
||||
return list.toArray(new String[list.size()]);
|
||||
} else if (value instanceof String) {
|
||||
return new String[] {(String) value};
|
||||
} else {
|
||||
throw new IllegalArgumentException("Expected [" + name + "] to be a list of a string but was [" + value + ']');
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,113 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugin.reindex;
|
||||
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
|
||||
import org.elasticsearch.rest.BaseRestHandler;
|
||||
import org.elasticsearch.rest.RestChannel;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.rest.action.search.RestSearchAction;
|
||||
import org.elasticsearch.rest.action.support.RestActions;
|
||||
import org.elasticsearch.script.Script;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.plugin.reindex.AbstractBulkByScrollRequest.SIZE_ALL_MATCHES;
|
||||
import static org.elasticsearch.plugin.reindex.RestReindexAction.parseCommon;
|
||||
import static org.elasticsearch.plugin.reindex.UpdateByQueryAction.INSTANCE;
|
||||
import static org.elasticsearch.rest.RestRequest.Method.POST;
|
||||
|
||||
public class RestUpdateByQueryAction extends BaseRestHandler {
|
||||
private IndicesQueriesRegistry indicesQueriesRegistry;
|
||||
|
||||
@Inject
|
||||
public RestUpdateByQueryAction(Settings settings, RestController controller, Client client,
|
||||
IndicesQueriesRegistry indicesQueriesRegistry) {
|
||||
super(settings, controller, client);
|
||||
this.indicesQueriesRegistry = indicesQueriesRegistry;
|
||||
controller.registerHandler(POST, "/{index}/_update_by_query", this);
|
||||
controller.registerHandler(POST, "/{index}/{type}/_update_by_query", this);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void handleRequest(RestRequest request, RestChannel channel, Client client) throws Exception {
|
||||
/*
|
||||
* Passing the search request through UpdateByQueryRequest first allows
|
||||
* it to set its own defaults which differ from SearchRequest's
|
||||
* defaults. Then the parse can override them.
|
||||
*/
|
||||
UpdateByQueryRequest internalRequest = new UpdateByQueryRequest(new SearchRequest());
|
||||
int scrollSize = internalRequest.getSource().source().size();
|
||||
internalRequest.getSource().source().size(SIZE_ALL_MATCHES);
|
||||
/*
|
||||
* We can't send parseSearchRequest REST content that it doesn't support
|
||||
* so we will have to remove the content that is valid in addition to
|
||||
* what it supports from the content first. This is a temporary hack and
|
||||
* should get better when SearchRequest has full ObjectParser support
|
||||
* then we can delegate and stuff.
|
||||
*/
|
||||
BytesReference bodyContent = null;
|
||||
if (RestActions.hasBodyContent(request)) {
|
||||
bodyContent = RestActions.getRestContent(request);
|
||||
Tuple<XContentType, Map<String, Object>> body = XContentHelper.convertToMap(bodyContent, false);
|
||||
boolean modified = false;
|
||||
String conflicts = (String) body.v2().remove("conflicts");
|
||||
if (conflicts != null) {
|
||||
internalRequest.setConflicts(conflicts);
|
||||
modified = true;
|
||||
}
|
||||
@SuppressWarnings("unchecked")
|
||||
Map<String, Object> script = (Map<String, Object>) body.v2().remove("script");
|
||||
if (script != null) {
|
||||
internalRequest.setScript(Script.parse(script, false, parseFieldMatcher));
|
||||
modified = true;
|
||||
}
|
||||
if (modified) {
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(body.v1());
|
||||
builder.map(body.v2());
|
||||
bodyContent = builder.bytes();
|
||||
}
|
||||
}
|
||||
RestSearchAction.parseSearchRequest(internalRequest.getSource(), indicesQueriesRegistry, request,
|
||||
parseFieldMatcher, bodyContent);
|
||||
|
||||
String conflicts = request.param("conflicts");
|
||||
if (conflicts != null) {
|
||||
internalRequest.setConflicts(conflicts);
|
||||
}
|
||||
parseCommon(internalRequest, request);
|
||||
|
||||
internalRequest.setSize(internalRequest.getSource().source().size());
|
||||
internalRequest.getSource().source().size(request.paramAsInt("scroll_size", scrollSize));
|
||||
|
||||
client.execute(INSTANCE, internalRequest, new BulkIndexByScrollResponseContentListener<>(channel));
|
||||
}
|
||||
}
|
@ -0,0 +1,246 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugin.reindex;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.AutoCreateIndex;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.mapper.internal.TTLFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.VersionFieldMapper;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
import static java.util.Objects.requireNonNull;
|
||||
import static org.elasticsearch.index.VersionType.INTERNAL;
|
||||
|
||||
public class TransportReindexAction extends HandledTransportAction<ReindexRequest, ReindexResponse> {
|
||||
private final ClusterService clusterService;
|
||||
private final ScriptService scriptService;
|
||||
private final AutoCreateIndex autoCreateIndex;
|
||||
private final Client client;
|
||||
|
||||
@Inject
|
||||
public TransportReindexAction(Settings settings, ThreadPool threadPool, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, ClusterService clusterService, ScriptService scriptService,
|
||||
AutoCreateIndex autoCreateIndex, Client client, TransportService transportService) {
|
||||
super(settings, ReindexAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver,
|
||||
ReindexRequest::new);
|
||||
this.clusterService = clusterService;
|
||||
this.scriptService = scriptService;
|
||||
this.autoCreateIndex = autoCreateIndex;
|
||||
this.client = client;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(ReindexRequest request, ActionListener<ReindexResponse> listener) {
|
||||
validateAgainstAliases(request.getSource(), request.getDestination(), indexNameExpressionResolver, autoCreateIndex,
|
||||
clusterService.state());
|
||||
new AsyncIndexBySearchAction(logger, scriptService, client, threadPool, request, listener).start();
|
||||
}
|
||||
|
||||
/**
|
||||
* Throws an ActionRequestValidationException if the request tries to index
|
||||
* back into the same index or into an index that points to two indexes.
|
||||
* This cannot be done during request validation because the cluster state
|
||||
* isn't available then. Package private for testing.
|
||||
*/
|
||||
static String validateAgainstAliases(SearchRequest source, IndexRequest destination,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, AutoCreateIndex autoCreateIndex, ClusterState clusterState) {
|
||||
String target = destination.index();
|
||||
if (false == autoCreateIndex.shouldAutoCreate(target, clusterState)) {
|
||||
/*
|
||||
* If we're going to autocreate the index we don't need to resolve
|
||||
* it. This is the same sort of dance that TransportIndexRequest
|
||||
* uses to decide to autocreate the index.
|
||||
*/
|
||||
target = indexNameExpressionResolver.concreteIndices(clusterState, destination)[0];
|
||||
}
|
||||
for (String sourceIndex: indexNameExpressionResolver.concreteIndices(clusterState, source)) {
|
||||
if (sourceIndex.equals(target)) {
|
||||
ActionRequestValidationException e = new ActionRequestValidationException();
|
||||
e.addValidationError("reindex cannot write into an index its reading from [" + target + ']');
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
return target;
|
||||
}
|
||||
|
||||
/**
|
||||
* Simple implementation of reindex using scrolling and bulk. There are tons
|
||||
* of optimizations that can be done on certain types of reindex requests
|
||||
* but this makes no attempt to do any of them so it can be as simple
|
||||
* possible.
|
||||
*/
|
||||
static class AsyncIndexBySearchAction extends AbstractAsyncBulkIndexByScrollAction<ReindexRequest, ReindexResponse> {
|
||||
public AsyncIndexBySearchAction(ESLogger logger, ScriptService scriptService, Client client, ThreadPool threadPool,
|
||||
ReindexRequest request, ActionListener<ReindexResponse> listener) {
|
||||
super(logger, scriptService, client, threadPool, request, request.getSource(), listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected IndexRequest buildIndexRequest(SearchHit doc) {
|
||||
IndexRequest index = new IndexRequest(mainRequest.getDestination(), mainRequest);
|
||||
|
||||
// We want the index from the copied request, not the doc.
|
||||
index.id(doc.id());
|
||||
if (index.type() == null) {
|
||||
/*
|
||||
* Default to doc's type if not specified in request so its easy
|
||||
* to do a scripted update.
|
||||
*/
|
||||
index.type(doc.type());
|
||||
}
|
||||
index.source(doc.sourceRef());
|
||||
/*
|
||||
* Internal versioning can just use what we copied from the
|
||||
* destionation request. Otherwise we assume we're using external
|
||||
* versioning and use the doc's version.
|
||||
*/
|
||||
if (index.versionType() != INTERNAL) {
|
||||
index.version(doc.version());
|
||||
}
|
||||
return index;
|
||||
}
|
||||
|
||||
/**
|
||||
* Override the simple copy behavior to allow more fine grained control.
|
||||
*/
|
||||
@Override
|
||||
protected void copyRouting(IndexRequest index, SearchHit doc) {
|
||||
String routingSpec = mainRequest.getDestination().routing();
|
||||
if (routingSpec == null) {
|
||||
super.copyRouting(index, doc);
|
||||
return;
|
||||
}
|
||||
if (routingSpec.startsWith("=")) {
|
||||
index.routing(mainRequest.getDestination().routing().substring(1));
|
||||
return;
|
||||
}
|
||||
switch (routingSpec) {
|
||||
case "keep":
|
||||
super.copyRouting(index, doc);
|
||||
break;
|
||||
case "discard":
|
||||
index.routing(null);
|
||||
break;
|
||||
default:
|
||||
throw new IllegalArgumentException("Unsupported routing command");
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Methods below here handle script updating the index request. They try
|
||||
* to be pretty liberal with regards to types because script are often
|
||||
* dynamically typed.
|
||||
*/
|
||||
@Override
|
||||
protected ReindexResponse buildResponse(long took) {
|
||||
return new ReindexResponse(took, created(), updated(), batches(), versionConflicts(), noops(), indexingFailures(),
|
||||
searchFailures());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void scriptChangedIndex(IndexRequest index, Object to) {
|
||||
requireNonNull(to, "Can't reindex without a destination index!");
|
||||
index.index(to.toString());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void scriptChangedType(IndexRequest index, Object to) {
|
||||
requireNonNull(to, "Can't reindex without a destination type!");
|
||||
index.type(to.toString());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void scriptChangedId(IndexRequest index, Object to) {
|
||||
index.id(Objects.toString(to, null));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void scriptChangedVersion(IndexRequest index, Object to) {
|
||||
if (to == null) {
|
||||
index.version(Versions.MATCH_ANY).versionType(INTERNAL);
|
||||
return;
|
||||
}
|
||||
index.version(asLong(to, VersionFieldMapper.NAME));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void scriptChangedParent(IndexRequest index, Object to) {
|
||||
// Have to override routing with parent just in case its changed
|
||||
String routing = Objects.toString(to, null);
|
||||
index.parent(routing).routing(routing);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void scriptChangedRouting(IndexRequest index, Object to) {
|
||||
index.routing(Objects.toString(to, null));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void scriptChangedTimestamp(IndexRequest index, Object to) {
|
||||
index.timestamp(Objects.toString(to, null));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void scriptChangedTTL(IndexRequest index, Object to) {
|
||||
if (to == null) {
|
||||
index.ttl(null);
|
||||
return;
|
||||
}
|
||||
index.ttl(asLong(to, TTLFieldMapper.NAME));
|
||||
}
|
||||
|
||||
private long asLong(Object from, String name) {
|
||||
/*
|
||||
* Stuffing a number into the map will have converted it to
|
||||
* some Number.
|
||||
*/
|
||||
Number fromNumber;
|
||||
try {
|
||||
fromNumber = (Number) from;
|
||||
} catch (ClassCastException e) {
|
||||
throw new IllegalArgumentException(name + " may only be set to an int or a long but was [" + from + "]", e);
|
||||
}
|
||||
long l = fromNumber.longValue();
|
||||
// Check that we didn't round when we fetched the value.
|
||||
if (fromNumber.doubleValue() != l) {
|
||||
throw new IllegalArgumentException(name + " may only be set to an int or a long but was [" + from + "]");
|
||||
}
|
||||
return l;
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,131 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugin.reindex;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.index.mapper.internal.IdFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.IndexFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.RoutingFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.TTLFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.TimestampFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
public class TransportUpdateByQueryAction extends HandledTransportAction<UpdateByQueryRequest, BulkIndexByScrollResponse> {
|
||||
private final Client client;
|
||||
private final ScriptService scriptService;
|
||||
|
||||
@Inject
|
||||
public TransportUpdateByQueryAction(Settings settings, ThreadPool threadPool, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, Client client, TransportService transportService,
|
||||
ScriptService scriptService) {
|
||||
super(settings, UpdateByQueryAction.NAME, threadPool, transportService, actionFilters,
|
||||
indexNameExpressionResolver, UpdateByQueryRequest::new);
|
||||
this.client = client;
|
||||
this.scriptService = scriptService;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(UpdateByQueryRequest request,
|
||||
ActionListener<BulkIndexByScrollResponse> listener) {
|
||||
new AsyncIndexBySearchAction(logger, scriptService, client, threadPool, request, listener).start();
|
||||
}
|
||||
|
||||
/**
|
||||
* Simple implementation of update-by-query using scrolling and bulk.
|
||||
*/
|
||||
static class AsyncIndexBySearchAction extends AbstractAsyncBulkIndexByScrollAction<UpdateByQueryRequest, BulkIndexByScrollResponse> {
|
||||
public AsyncIndexBySearchAction(ESLogger logger, ScriptService scriptService, Client client, ThreadPool threadPool,
|
||||
UpdateByQueryRequest request, ActionListener<BulkIndexByScrollResponse> listener) {
|
||||
super(logger, scriptService, client, threadPool, request, request.getSource(), listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected IndexRequest buildIndexRequest(SearchHit doc) {
|
||||
IndexRequest index = new IndexRequest(mainRequest);
|
||||
index.index(doc.index());
|
||||
index.type(doc.type());
|
||||
index.id(doc.id());
|
||||
index.source(doc.sourceRef());
|
||||
index.versionType(VersionType.INTERNAL);
|
||||
index.version(doc.version());
|
||||
return index;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected BulkIndexByScrollResponse buildResponse(long took) {
|
||||
return new BulkIndexByScrollResponse(took, updated(), batches(), versionConflicts(), noops(), indexingFailures(),
|
||||
searchFailures());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void scriptChangedIndex(IndexRequest index, Object to) {
|
||||
throw new IllegalArgumentException("Modifying [" + IndexFieldMapper.NAME + "] not allowed");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void scriptChangedType(IndexRequest index, Object to) {
|
||||
throw new IllegalArgumentException("Modifying [" + TypeFieldMapper.NAME + "] not allowed");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void scriptChangedId(IndexRequest index, Object to) {
|
||||
throw new IllegalArgumentException("Modifying [" + IdFieldMapper.NAME + "] not allowed");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void scriptChangedVersion(IndexRequest index, Object to) {
|
||||
throw new IllegalArgumentException("Modifying [_version] not allowed");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void scriptChangedRouting(IndexRequest index, Object to) {
|
||||
throw new IllegalArgumentException("Modifying [" + RoutingFieldMapper.NAME + "] not allowed");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void scriptChangedParent(IndexRequest index, Object to) {
|
||||
throw new IllegalArgumentException("Modifying [" + ParentFieldMapper.NAME + "] not allowed");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void scriptChangedTimestamp(IndexRequest index, Object to) {
|
||||
throw new IllegalArgumentException("Modifying [" + TimestampFieldMapper.NAME + "] not allowed");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void scriptChangedTTL(IndexRequest index, Object to) {
|
||||
throw new IllegalArgumentException("Modifying [" + TTLFieldMapper.NAME + "] not allowed");
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,43 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugin.reindex;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class UpdateByQueryAction extends
|
||||
Action<UpdateByQueryRequest, BulkIndexByScrollResponse, UpdateByQueryRequestBuilder> {
|
||||
public static final UpdateByQueryAction INSTANCE = new UpdateByQueryAction();
|
||||
public static final String NAME = "indices:data/write/update/byquery";
|
||||
|
||||
private UpdateByQueryAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public UpdateByQueryRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new UpdateByQueryRequestBuilder(client, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public BulkIndexByScrollResponse newResponse() {
|
||||
return new BulkIndexByScrollResponse();
|
||||
}
|
||||
}
|
@ -0,0 +1,48 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugin.reindex;
|
||||
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
|
||||
/**
|
||||
* Request to reindex a set of documents where they are without changing their
|
||||
* locations or IDs.
|
||||
*/
|
||||
public class UpdateByQueryRequest extends AbstractBulkIndexByScrollRequest<UpdateByQueryRequest> {
|
||||
public UpdateByQueryRequest() {
|
||||
}
|
||||
|
||||
public UpdateByQueryRequest(SearchRequest search) {
|
||||
super(search);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected UpdateByQueryRequest self() {
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder b = new StringBuilder();
|
||||
b.append("update-by-query ");
|
||||
searchToString(b);
|
||||
return b.toString();
|
||||
}
|
||||
}
|
@ -0,0 +1,51 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugin.reindex;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.action.search.SearchAction;
|
||||
import org.elasticsearch.action.search.SearchRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class UpdateByQueryRequestBuilder extends
|
||||
AbstractBulkIndexByScrollRequestBuilder<UpdateByQueryRequest, BulkIndexByScrollResponse, UpdateByQueryRequestBuilder> {
|
||||
|
||||
public UpdateByQueryRequestBuilder(ElasticsearchClient client,
|
||||
Action<UpdateByQueryRequest, BulkIndexByScrollResponse, UpdateByQueryRequestBuilder> action) {
|
||||
this(client, action, new SearchRequestBuilder(client, SearchAction.INSTANCE));
|
||||
}
|
||||
|
||||
private UpdateByQueryRequestBuilder(ElasticsearchClient client,
|
||||
Action<UpdateByQueryRequest, BulkIndexByScrollResponse, UpdateByQueryRequestBuilder> action,
|
||||
SearchRequestBuilder search) {
|
||||
super(client, action, search, new UpdateByQueryRequest(search.request()));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected UpdateByQueryRequestBuilder self() {
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public UpdateByQueryRequestBuilder abortOnVersionConflict(boolean abortOnVersionConflict) {
|
||||
request.setAbortOnVersionConflict(abortOnVersionConflict);
|
||||
return this;
|
||||
}
|
||||
}
|
@ -0,0 +1,65 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugin.reindex;
|
||||
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.common.text.StringText;
|
||||
import org.elasticsearch.script.ExecutableScript;
|
||||
import org.elasticsearch.search.SearchHitField;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.internal.InternalSearchHit;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
import static java.util.Collections.singletonMap;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
public abstract class AbstractAsyncBulkIndexByScrollActionScriptTestCase<Request extends AbstractBulkIndexByScrollRequest<Request>, Response extends BulkIndexByScrollResponse>
|
||||
extends AbstractAsyncBulkIndexByScrollActionTestCase<Request, Response> {
|
||||
protected IndexRequest applyScript(Consumer<Map<String, Object>> scriptBody) {
|
||||
IndexRequest index = new IndexRequest("index", "type", "1").source(singletonMap("foo", "bar"));
|
||||
Map<String, SearchHitField> fields = new HashMap<>();
|
||||
InternalSearchHit doc = new InternalSearchHit(0, "id", new StringText("type"), fields);
|
||||
doc.shardTarget(new SearchShardTarget("nodeid", "index", 1));
|
||||
ExecutableScript script = new SimpleExecutableScript(scriptBody);
|
||||
action().applyScript(index, doc, script, new HashMap<>());
|
||||
return index;
|
||||
}
|
||||
|
||||
public void testScriptAddingJunkToCtxIsError() {
|
||||
try {
|
||||
applyScript((Map<String, Object> ctx) -> ctx.put("junk", "junk"));
|
||||
fail("Expected error");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), equalTo("Invalid fields added to ctx [junk]"));
|
||||
}
|
||||
}
|
||||
|
||||
public void testChangeSource() {
|
||||
IndexRequest index = applyScript((Map<String, Object> ctx) -> {
|
||||
@SuppressWarnings("unchecked")
|
||||
Map<String, Object> source = (Map<String, Object>) ctx.get("_source");
|
||||
source.put("bar", "cat");
|
||||
});
|
||||
assertEquals("cat", index.sourceAsMap().get("bar"));
|
||||
}
|
||||
}
|
@ -0,0 +1,51 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugin.reindex;
|
||||
|
||||
import org.elasticsearch.action.support.PlainActionFuture;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
public abstract class AbstractAsyncBulkIndexByScrollActionTestCase<Request extends AbstractBulkIndexByScrollRequest<Request>, Response extends BulkIndexByScrollResponse>
|
||||
extends ESTestCase {
|
||||
protected ThreadPool threadPool;
|
||||
|
||||
@Before
|
||||
public void setupForTest() {
|
||||
threadPool = new ThreadPool(getTestName());
|
||||
}
|
||||
|
||||
@After
|
||||
@Override
|
||||
public void tearDown() throws Exception {
|
||||
super.tearDown();
|
||||
threadPool.shutdown();
|
||||
}
|
||||
|
||||
protected abstract AbstractAsyncBulkIndexByScrollAction<Request, Response> action();
|
||||
|
||||
protected abstract Request request();
|
||||
|
||||
protected PlainActionFuture<Response> listener() {
|
||||
return new PlainActionFuture<>();
|
||||
}
|
||||
}
|
@ -0,0 +1,62 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugin.reindex;
|
||||
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.common.text.StringText;
|
||||
import org.elasticsearch.index.mapper.internal.TTLFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.TimestampFieldMapper;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.internal.InternalSearchHit;
|
||||
import org.elasticsearch.search.internal.InternalSearchHitField;
|
||||
|
||||
import static java.util.Collections.singletonList;
|
||||
import static java.util.Collections.singletonMap;
|
||||
|
||||
public abstract class AbstractAsyncBulkIndexbyScrollActionMetadataTestCase<Request extends AbstractBulkIndexByScrollRequest<Request>, Response extends BulkIndexByScrollResponse>
|
||||
extends AbstractAsyncBulkIndexByScrollActionTestCase<Request, Response> {
|
||||
|
||||
/**
|
||||
* Create a doc with some metadata.
|
||||
* @param field
|
||||
* @param value
|
||||
* @return
|
||||
*/
|
||||
protected InternalSearchHit doc(String field, Object value) {
|
||||
InternalSearchHit doc = new InternalSearchHit(0, "id", new StringText("type"), singletonMap(field,
|
||||
new InternalSearchHitField(field, singletonList(value))));
|
||||
doc.shardTarget(new SearchShardTarget("node", "shard", 0));
|
||||
// doc.sourceRef(new BytesArray("{\"foo\": \"bar\"}"));
|
||||
return doc;
|
||||
}
|
||||
|
||||
public void testTimestampIsCopied() {
|
||||
IndexRequest index = new IndexRequest();
|
||||
action().copyMetadata(index, doc(TimestampFieldMapper.NAME, 10L));
|
||||
assertEquals("10", index.timestamp());
|
||||
}
|
||||
|
||||
public void testTTL() throws Exception {
|
||||
IndexRequest index = new IndexRequest();
|
||||
action().copyMetadata(index, doc(TTLFieldMapper.NAME, 10L));
|
||||
assertEquals(10, index.ttl());
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,112 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugin.reindex;
|
||||
|
||||
import org.hamcrest.Description;
|
||||
import org.hamcrest.Matcher;
|
||||
import org.hamcrest.TypeSafeMatcher;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
public abstract class AbstractBulkIndexByScrollResponseMatcher<Response extends BulkIndexByScrollResponse, Self extends AbstractBulkIndexByScrollResponseMatcher<Response, Self>>
|
||||
extends TypeSafeMatcher<Response> {
|
||||
private Matcher<Long> updatedMatcher = equalTo(0l);
|
||||
/**
|
||||
* Matches for number of batches. Optional.
|
||||
*/
|
||||
private Matcher<Integer> batchesMatcher;
|
||||
private Matcher<Long> versionConflictsMatcher = equalTo(0l);
|
||||
private Matcher<Integer> failuresMatcher = equalTo(0);
|
||||
|
||||
protected abstract Self self();
|
||||
|
||||
public Self updated(Matcher<Long> updatedMatcher) {
|
||||
this.updatedMatcher = updatedMatcher;
|
||||
return self();
|
||||
}
|
||||
|
||||
public Self updated(long updated) {
|
||||
return updated(equalTo(updated));
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the matches for the number of batches. Defaults to matching any
|
||||
* integer because we usually don't care about how many batches the job
|
||||
* takes.
|
||||
*/
|
||||
public Self batches(Matcher<Integer> batchesMatcher) {
|
||||
this.batchesMatcher = batchesMatcher;
|
||||
return self();
|
||||
}
|
||||
|
||||
public Self batches(int batches) {
|
||||
return batches(equalTo(batches));
|
||||
}
|
||||
|
||||
public Self batches(int total, int batchSize) {
|
||||
// Round up
|
||||
return batches((total + batchSize - 1) / batchSize);
|
||||
}
|
||||
|
||||
public Self versionConflicts(Matcher<Long> versionConflictsMatcher) {
|
||||
this.versionConflictsMatcher = versionConflictsMatcher;
|
||||
return self();
|
||||
}
|
||||
|
||||
public Self versionConflicts(long versionConflicts) {
|
||||
return versionConflicts(equalTo(versionConflicts));
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the matcher for the size of the failures list. For more in depth
|
||||
* matching do it by hand. The type signatures required to match the
|
||||
* actual failures list here just don't work.
|
||||
*/
|
||||
public Self failures(Matcher<Integer> failuresMatcher) {
|
||||
this.failuresMatcher = failuresMatcher;
|
||||
return self();
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the expected size of the failures list.
|
||||
*/
|
||||
public Self failures(int failures) {
|
||||
return failures(equalTo(failures));
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected boolean matchesSafely(Response item) {
|
||||
return updatedMatcher.matches(item.getUpdated()) &&
|
||||
(batchesMatcher == null || batchesMatcher.matches(item.getBatches())) &&
|
||||
versionConflictsMatcher.matches(item.getVersionConflicts()) &&
|
||||
failuresMatcher.matches(item.getIndexingFailures().size());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void describeTo(Description description) {
|
||||
description.appendText("indexed matches ").appendDescriptionOf(updatedMatcher);
|
||||
if (batchesMatcher != null) {
|
||||
description.appendText(" and batches matches ").appendDescriptionOf(batchesMatcher);
|
||||
}
|
||||
description.appendText(" and versionConflicts matches ").appendDescriptionOf(versionConflictsMatcher);
|
||||
description.appendText(" and failures size matches ").appendDescriptionOf(failuresMatcher);
|
||||
}
|
||||
}
|
@ -0,0 +1,189 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugin.reindex;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse;
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse.Failure;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.bulk.BulkResponse;
|
||||
import org.elasticsearch.action.search.ClearScrollRequest;
|
||||
import org.elasticsearch.action.search.ClearScrollResponse;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.elasticsearch.action.support.PlainActionFuture;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.client.FilterClient;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.client.NoOpClient;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.Executor;
|
||||
|
||||
import static org.apache.lucene.util.TestUtil.randomSimpleString;
|
||||
import static org.hamcrest.Matchers.contains;
|
||||
import static org.hamcrest.Matchers.emptyCollectionOf;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
public class AsyncBulkByScrollActionTest extends ESTestCase {
|
||||
private MockClearScrollClient client;
|
||||
private ThreadPool threadPool;
|
||||
private DummyAbstractBulkByScrollRequest mainRequest;
|
||||
private SearchRequest firstSearchRequest;
|
||||
private PlainActionFuture<Object> listener;
|
||||
private String scrollId;
|
||||
|
||||
@Before
|
||||
public void setupForTest() {
|
||||
client = new MockClearScrollClient(new NoOpClient(getTestName()));
|
||||
threadPool = new ThreadPool(getTestName());
|
||||
mainRequest = new DummyAbstractBulkByScrollRequest();
|
||||
firstSearchRequest = null;
|
||||
listener = new PlainActionFuture<>();
|
||||
scrollId = null;
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDownAndVerifyCommonStuff() {
|
||||
client.close();
|
||||
threadPool.shutdown();
|
||||
if (scrollId != null) {
|
||||
assertThat(client.scrollsCleared, contains(scrollId));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates a random scrollId and registers it so that when the test
|
||||
* finishes we check that it was cleared. Subsequent calls reregister a new
|
||||
* random scroll id so it is checked instead.
|
||||
*/
|
||||
private String scrollId() {
|
||||
scrollId = randomSimpleString(random());
|
||||
return scrollId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Mimicks a ThreadPool rejecting execution of the task.
|
||||
*/
|
||||
public void testThreadPoolRejectionsAbortRequest() throws Exception {
|
||||
threadPool.shutdown();
|
||||
threadPool = new ThreadPool(getTestName()) {
|
||||
@Override
|
||||
public Executor generic() {
|
||||
return new Executor() {
|
||||
@Override
|
||||
public void execute(Runnable command) {
|
||||
((AbstractRunnable) command).onRejection(new EsRejectedExecutionException("test"));
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
new DummyAbstractAsyncBulkByScrollAction()
|
||||
.onScrollResponse(new SearchResponse(null, scrollId(), 5, 4, randomLong(), null));
|
||||
try {
|
||||
listener.get();
|
||||
fail("Expected a failure");
|
||||
} catch (ExecutionException e) {
|
||||
assertThat(e.getMessage(), equalTo("EsRejectedExecutionException[test]"));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Mimicks shard search failures usually caused by the data node serving the
|
||||
* scroll request going down.
|
||||
*/
|
||||
public void testShardFailuresAbortRequest() throws Exception {
|
||||
ShardSearchFailure shardFailure = new ShardSearchFailure(new RuntimeException("test"));
|
||||
DummyAbstractAsyncBulkByScrollAction action = new DummyAbstractAsyncBulkByScrollAction();
|
||||
action.onScrollResponse(new SearchResponse(null, scrollId(), 5, 4, randomLong(), new ShardSearchFailure[] { shardFailure }));
|
||||
listener.get();
|
||||
assertThat(action.indexingFailures(), emptyCollectionOf(Failure.class));
|
||||
assertThat(action.searchFailures(), contains(shardFailure));
|
||||
}
|
||||
|
||||
/**
|
||||
* Mimicks bulk indexing failures.
|
||||
*/
|
||||
public void testBulkFailuresAbortRequest() throws Exception {
|
||||
Failure failure = new Failure("index", "type", "id", new RuntimeException("test"));
|
||||
DummyAbstractAsyncBulkByScrollAction action = new DummyAbstractAsyncBulkByScrollAction();
|
||||
action.onBulkResponse(new BulkResponse(new BulkItemResponse[] {new BulkItemResponse(0, "index", failure)}, randomLong()));
|
||||
listener.get();
|
||||
assertThat(action.indexingFailures(), contains(failure));
|
||||
assertThat(action.searchFailures(), emptyCollectionOf(ShardSearchFailure.class));
|
||||
}
|
||||
|
||||
private class DummyAbstractAsyncBulkByScrollAction extends AbstractAsyncBulkByScrollAction<DummyAbstractBulkByScrollRequest, Object> {
|
||||
public DummyAbstractAsyncBulkByScrollAction() {
|
||||
super(logger, client, threadPool, AsyncBulkByScrollActionTest.this.mainRequest, firstSearchRequest, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected BulkRequest buildBulk(Iterable<SearchHit> docs) {
|
||||
return new BulkRequest();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Object buildResponse(long took) {
|
||||
return new Object();
|
||||
}
|
||||
}
|
||||
|
||||
private static class DummyAbstractBulkByScrollRequest extends AbstractBulkByScrollRequest<DummyAbstractBulkByScrollRequest> {
|
||||
@Override
|
||||
protected DummyAbstractBulkByScrollRequest self() {
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
private static class MockClearScrollClient extends FilterClient {
|
||||
private List<String> scrollsCleared = new ArrayList<>();
|
||||
|
||||
public MockClearScrollClient(Client in) {
|
||||
super(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings({ "rawtypes", "unchecked" }) // Declaration is raw
|
||||
protected <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(
|
||||
Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
|
||||
if (request instanceof ClearScrollRequest) {
|
||||
ClearScrollRequest clearScroll = (ClearScrollRequest) request;
|
||||
scrollsCleared.addAll(clearScroll.getScrollIds());
|
||||
listener.onResponse((Response) new ClearScrollResponse(true, clearScroll.getScrollIds().size()));
|
||||
return;
|
||||
}
|
||||
super.doExecute(action, request, listener);
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,123 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugin.reindex;
|
||||
|
||||
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
|
||||
public class ReindexBasicTests extends ReindexTestCase {
|
||||
public void testFiltering() throws Exception {
|
||||
indexRandom(true, client().prepareIndex("source", "test", "1").setSource("foo", "a"),
|
||||
client().prepareIndex("source", "test", "2").setSource("foo", "a"),
|
||||
client().prepareIndex("source", "test", "3").setSource("foo", "b"),
|
||||
client().prepareIndex("source", "test", "4").setSource("foo", "c"));
|
||||
assertHitCount(client().prepareSearch("source").setSize(0).get(), 4);
|
||||
|
||||
// Copy all the docs
|
||||
ReindexRequestBuilder copy = reindex().source("source").destination("dest", "all").refresh(true);
|
||||
assertThat(copy.get(), responseMatcher().created(4));
|
||||
assertHitCount(client().prepareSearch("dest").setTypes("all").setSize(0).get(), 4);
|
||||
|
||||
// Now none of them
|
||||
copy = reindex().source("source").destination("all", "none").filter(termQuery("foo", "no_match")).refresh(true);
|
||||
assertThat(copy.get(), responseMatcher().created(0));
|
||||
assertHitCount(client().prepareSearch("dest").setTypes("none").setSize(0).get(), 0);
|
||||
|
||||
// Now half of them
|
||||
copy = reindex().source("source").destination("dest", "half").filter(termQuery("foo", "a")).refresh(true);
|
||||
assertThat(copy.get(), responseMatcher().created(2));
|
||||
assertHitCount(client().prepareSearch("dest").setTypes("half").setSize(0).get(), 2);
|
||||
|
||||
// Limit with size
|
||||
copy = reindex().source("source").destination("dest", "size_one").size(1).refresh(true);
|
||||
assertThat(copy.get(), responseMatcher().created(1));
|
||||
assertHitCount(client().prepareSearch("dest").setTypes("size_one").setSize(0).get(), 1);
|
||||
}
|
||||
|
||||
public void testCopyMany() throws Exception {
|
||||
List<IndexRequestBuilder> docs = new ArrayList<>();
|
||||
int max = between(150, 500);
|
||||
for (int i = 0; i < max; i++) {
|
||||
docs.add(client().prepareIndex("source", "test", Integer.toString(i)).setSource("foo", "a"));
|
||||
}
|
||||
|
||||
indexRandom(true, docs);
|
||||
assertHitCount(client().prepareSearch("source").setSize(0).get(), max);
|
||||
|
||||
// Copy all the docs
|
||||
ReindexRequestBuilder copy = reindex().source("source").destination("dest", "all").refresh(true);
|
||||
// Use a small batch size so we have to use more than one batch
|
||||
copy.source().setSize(5);
|
||||
assertThat(copy.get(), responseMatcher().created(max).batches(max, 5));
|
||||
assertHitCount(client().prepareSearch("dest").setTypes("all").setSize(0).get(), max);
|
||||
|
||||
// Copy some of the docs
|
||||
int half = max / 2;
|
||||
copy = reindex().source("source").destination("dest", "half").refresh(true);
|
||||
// Use a small batch size so we have to use more than one batch
|
||||
copy.source().setSize(5);
|
||||
copy.size(half); // The real "size" of the request.
|
||||
assertThat(copy.get(), responseMatcher().created(half).batches(half, 5));
|
||||
assertHitCount(client().prepareSearch("dest").setTypes("half").setSize(0).get(), half);
|
||||
}
|
||||
|
||||
public void testRefreshIsFalseByDefault() throws Exception {
|
||||
refreshTestCase(null, false);
|
||||
}
|
||||
|
||||
public void testRefreshFalseDoesntMakeVisible() throws Exception {
|
||||
refreshTestCase(false, false);
|
||||
}
|
||||
|
||||
public void testRefreshTrueMakesVisible() throws Exception {
|
||||
refreshTestCase(true, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a reindex into an index with -1 refresh_interval and checks that
|
||||
* the documents are visible properly.
|
||||
*/
|
||||
private void refreshTestCase(Boolean refresh, boolean visible) throws Exception {
|
||||
CreateIndexRequestBuilder create = client().admin().indices().prepareCreate("dest").setSettings("refresh_interval", -1);
|
||||
assertAcked(create);
|
||||
ensureYellow();
|
||||
indexRandom(true, client().prepareIndex("source", "test", "1").setSource("foo", "a"),
|
||||
client().prepareIndex("source", "test", "2").setSource("foo", "a"),
|
||||
client().prepareIndex("source", "test", "3").setSource("foo", "b"),
|
||||
client().prepareIndex("source", "test", "4").setSource("foo", "c"));
|
||||
assertHitCount(client().prepareSearch("source").setSize(0).get(), 4);
|
||||
|
||||
// Copy all the docs
|
||||
ReindexRequestBuilder copy = reindex().source("source").destination("dest", "all");
|
||||
if (refresh != null) {
|
||||
copy.refresh(refresh);
|
||||
}
|
||||
assertThat(copy.get(), responseMatcher().created(4));
|
||||
|
||||
assertHitCount(client().prepareSearch("dest").setTypes("all").setSize(0).get(), visible ? 4 : 0);
|
||||
}
|
||||
}
|
@ -0,0 +1,147 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugin.reindex;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.Future;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse.Failure;
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
|
||||
import static org.elasticsearch.action.index.IndexRequest.OpType.CREATE;
|
||||
import static org.hamcrest.Matchers.both;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.either;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.lessThanOrEqualTo;
|
||||
|
||||
/**
|
||||
* Tests failure capturing and abort-on-failure behavior of index-by-search.
|
||||
*/
|
||||
public class ReindexFailureTests extends ReindexTestCase {
|
||||
public void testFailuresCauseAbortDefault() throws Exception {
|
||||
/*
|
||||
* Create the destination index such that the copy will cause a mapping
|
||||
* conflict on every request.
|
||||
*/
|
||||
indexRandom(true,
|
||||
client().prepareIndex("dest", "test", "test").setSource("test", 10) /* Its a string in the source! */);
|
||||
|
||||
indexDocs(100);
|
||||
|
||||
ReindexRequestBuilder copy = reindex().source("source").destination("dest");
|
||||
/*
|
||||
* Set the search size to something very small to cause there to be
|
||||
* multiple batches for this request so we can assert that we abort on
|
||||
* the first batch.
|
||||
*/
|
||||
copy.source().setSize(1);
|
||||
|
||||
ReindexResponse response = copy.get();
|
||||
assertThat(response, responseMatcher()
|
||||
.batches(1)
|
||||
.failures(both(greaterThan(0)).and(lessThanOrEqualTo(maximumNumberOfShards()))));
|
||||
for (Failure failure: response.getIndexingFailures()) {
|
||||
assertThat(failure.getMessage(), containsString("NumberFormatException[For input string: \"words words\"]"));
|
||||
}
|
||||
}
|
||||
|
||||
public void testAbortOnVersionConflict() throws Exception {
|
||||
// Just put something in the way of the copy.
|
||||
indexRandom(true,
|
||||
client().prepareIndex("dest", "test", "1").setSource("test", "test"));
|
||||
|
||||
indexDocs(100);
|
||||
|
||||
ReindexRequestBuilder copy = reindex().source("source").destination("dest").abortOnVersionConflict(true);
|
||||
// CREATE will cause the conflict to prevent the write.
|
||||
copy.destination().setOpType(CREATE);
|
||||
|
||||
ReindexResponse response = copy.get();
|
||||
assertThat(response, responseMatcher().batches(1).versionConflicts(1).failures(1).created(99));
|
||||
for (Failure failure: response.getIndexingFailures()) {
|
||||
assertThat(failure.getMessage(), containsString("VersionConflictEngineException[[test]["));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Make sure that search failures get pushed back to the user as failures of
|
||||
* the whole process. We do lose some information about how far along the
|
||||
* process got, but its important that they see these failures.
|
||||
*/
|
||||
public void testResponseOnSearchFailure() throws Exception {
|
||||
/*
|
||||
* Attempt to trigger a reindex failure by deleting the source index out
|
||||
* from under it.
|
||||
*/
|
||||
int attempt = 1;
|
||||
while (attempt < 5) {
|
||||
indexDocs(100);
|
||||
ReindexRequestBuilder copy = reindex().source("source").destination("dest");
|
||||
copy.source().setSize(10);
|
||||
Future<ReindexResponse> response = copy.execute();
|
||||
client().admin().indices().prepareDelete("source").get();
|
||||
|
||||
try {
|
||||
response.get();
|
||||
logger.info("Didn't trigger a reindex failure on the {} attempt", attempt);
|
||||
attempt++;
|
||||
} catch (ExecutionException e) {
|
||||
logger.info("Triggered a reindex failure on the {} attempt", attempt);
|
||||
assertThat(e.getMessage(), either(containsString("all shards failed")).or(containsString("No search context found")));
|
||||
return;
|
||||
}
|
||||
}
|
||||
assumeFalse("Wasn't able to trigger a reindex failure in " + attempt + " attempts.", true);
|
||||
}
|
||||
|
||||
public void testSettingTtlIsValidationFailure() throws Exception {
|
||||
indexDocs(1);
|
||||
ReindexRequestBuilder copy = reindex().source("source").destination("dest");
|
||||
copy.destination().setTTL(123);
|
||||
try {
|
||||
copy.get();
|
||||
} catch (ActionRequestValidationException e) {
|
||||
assertThat(e.getMessage(), containsString("setting ttl on destination isn't supported. use scripts instead."));
|
||||
}
|
||||
}
|
||||
|
||||
public void testSettingTimestampIsValidationFailure() throws Exception {
|
||||
indexDocs(1);
|
||||
ReindexRequestBuilder copy = reindex().source("source").destination("dest");
|
||||
copy.destination().setTimestamp("now");
|
||||
try {
|
||||
copy.get();
|
||||
} catch (ActionRequestValidationException e) {
|
||||
assertThat(e.getMessage(), containsString("setting timestamp on destination isn't supported. use scripts instead."));
|
||||
}
|
||||
}
|
||||
|
||||
private void indexDocs(int count) throws Exception {
|
||||
List<IndexRequestBuilder> docs = new ArrayList<IndexRequestBuilder>(count);
|
||||
for (int i = 0; i < count; i++) {
|
||||
docs.add(client().prepareIndex("source", "test", Integer.toString(i)).setSource("test", "words words"));
|
||||
}
|
||||
indexRandom(true, docs);
|
||||
}
|
||||
}
|
@ -0,0 +1,77 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugin.reindex;
|
||||
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.index.mapper.internal.RoutingFieldMapper;
|
||||
|
||||
/**
|
||||
* Index-by-search test for ttl, timestamp, and routing.
|
||||
*/
|
||||
public class ReindexMetadataTests extends AbstractAsyncBulkIndexbyScrollActionMetadataTestCase<ReindexRequest, ReindexResponse> {
|
||||
public void testRoutingCopiedByDefault() throws Exception {
|
||||
IndexRequest index = new IndexRequest();
|
||||
action().copyMetadata(index, doc(RoutingFieldMapper.NAME, "foo"));
|
||||
assertEquals("foo", index.routing());
|
||||
}
|
||||
|
||||
public void testRoutingCopiedIfRequested() throws Exception {
|
||||
TransportReindexAction.AsyncIndexBySearchAction action = action();
|
||||
action.mainRequest.getDestination().routing("keep");
|
||||
IndexRequest index = new IndexRequest();
|
||||
action.copyMetadata(index, doc(RoutingFieldMapper.NAME, "foo"));
|
||||
assertEquals("foo", index.routing());
|
||||
}
|
||||
|
||||
public void testRoutingDiscardedIfRequested() throws Exception {
|
||||
TransportReindexAction.AsyncIndexBySearchAction action = action();
|
||||
action.mainRequest.getDestination().routing("discard");
|
||||
IndexRequest index = new IndexRequest();
|
||||
action.copyMetadata(index, doc(RoutingFieldMapper.NAME, "foo"));
|
||||
assertEquals(null, index.routing());
|
||||
}
|
||||
|
||||
public void testRoutingSetIfRequested() throws Exception {
|
||||
TransportReindexAction.AsyncIndexBySearchAction action = action();
|
||||
action.mainRequest.getDestination().routing("=cat");
|
||||
IndexRequest index = new IndexRequest();
|
||||
action.copyMetadata(index, doc(RoutingFieldMapper.NAME, "foo"));
|
||||
assertEquals("cat", index.routing());
|
||||
}
|
||||
|
||||
public void testRoutingSetIfWithDegenerateValue() throws Exception {
|
||||
TransportReindexAction.AsyncIndexBySearchAction action = action();
|
||||
action.mainRequest.getDestination().routing("==]");
|
||||
IndexRequest index = new IndexRequest();
|
||||
action.copyMetadata(index, doc(RoutingFieldMapper.NAME, "foo"));
|
||||
assertEquals("=]", index.routing());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TransportReindexAction.AsyncIndexBySearchAction action() {
|
||||
return new TransportReindexAction.AsyncIndexBySearchAction(logger, null, null, threadPool, request(), listener());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ReindexRequest request() {
|
||||
return new ReindexRequest(new SearchRequest(), new IndexRequest());
|
||||
}
|
||||
}
|
@ -0,0 +1,112 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugin.reindex;
|
||||
|
||||
import static org.elasticsearch.index.query.QueryBuilders.hasParentQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.idsQuery;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
|
||||
/**
|
||||
* Index-by-search tests for parent/child.
|
||||
*/
|
||||
public class ReindexParentChildTests extends ReindexTestCase {
|
||||
QueryBuilder<?> findsCountry;
|
||||
QueryBuilder<?> findsCity;
|
||||
QueryBuilder<?> findsNeighborhood;
|
||||
|
||||
public void testParentChild() throws Exception {
|
||||
createParentChildIndex("source");
|
||||
createParentChildIndex("dest");
|
||||
createParentChildDocs("source");
|
||||
|
||||
// Copy parent to the new index
|
||||
ReindexRequestBuilder copy = reindex().source("source").destination("dest").filter(findsCountry).refresh(true);
|
||||
assertThat(copy.get(), responseMatcher().created(1));
|
||||
|
||||
// Copy the child to a new index
|
||||
copy = reindex().source("source").destination("dest").filter(findsCity).refresh(true);
|
||||
assertThat(copy.get(), responseMatcher().created(1));
|
||||
|
||||
// Make sure parent/child is intact on that index
|
||||
assertSearchHits(client().prepareSearch("dest").setQuery(findsCity).get(), "pittsburgh");
|
||||
|
||||
// Copy the grandchild to a new index
|
||||
copy = reindex().source("source").destination("dest").filter(findsNeighborhood).refresh(true);
|
||||
assertThat(copy.get(), responseMatcher().created(1));
|
||||
|
||||
// Make sure parent/child is intact on that index
|
||||
assertSearchHits(client().prepareSearch("dest").setQuery(findsNeighborhood).get(),
|
||||
"make-believe");
|
||||
|
||||
// Copy the parent/child/grandchild structure all at once to a third index
|
||||
createParentChildIndex("dest_all_at_once");
|
||||
copy = reindex().source("source").destination("dest_all_at_once").refresh(true);
|
||||
assertThat(copy.get(), responseMatcher().created(3));
|
||||
|
||||
// Make sure parent/child/grandchild is intact there too
|
||||
assertSearchHits(client().prepareSearch("dest_all_at_once").setQuery(findsNeighborhood).get(),
|
||||
"make-believe");
|
||||
}
|
||||
|
||||
public void testErrorMessageWhenBadParentChild() throws Exception {
|
||||
createParentChildIndex("source");
|
||||
createParentChildDocs("source");
|
||||
|
||||
ReindexRequestBuilder copy = reindex().source("source").destination("dest").filter(findsCity);
|
||||
try {
|
||||
copy.get();
|
||||
fail("Expected exception");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), equalTo("Can't specify parent if no parent field has been configured"));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Setup a parent/child index and return a query that should find the child
|
||||
* using the parent.
|
||||
*/
|
||||
private void createParentChildIndex(String indexName) throws Exception {
|
||||
CreateIndexRequestBuilder create = client().admin().indices().prepareCreate(indexName);
|
||||
create.addMapping("city", "{\"_parent\": {\"type\": \"country\"}}");
|
||||
create.addMapping("neighborhood", "{\"_parent\": {\"type\": \"city\"}}");
|
||||
assertAcked(create);
|
||||
ensureGreen();
|
||||
}
|
||||
|
||||
private void createParentChildDocs(String indexName) throws Exception {
|
||||
indexRandom(true, client().prepareIndex(indexName, "country", "united states").setSource("foo", "bar"),
|
||||
client().prepareIndex(indexName, "city", "pittsburgh").setParent("united states").setSource("foo", "bar"),
|
||||
client().prepareIndex(indexName, "neighborhood", "make-believe").setParent("pittsburgh")
|
||||
.setSource("foo", "bar").setRouting("united states"));
|
||||
|
||||
findsCountry = idsQuery("country").addIds("united states");
|
||||
findsCity = hasParentQuery("country", findsCountry);
|
||||
findsNeighborhood = hasParentQuery("city", findsCity);
|
||||
|
||||
// Make sure we built the parent/child relationship
|
||||
assertSearchHits(client().prepareSearch(indexName).setQuery(findsCity).get(), "pittsburgh");
|
||||
assertSearchHits(client().prepareSearch(indexName).setQuery(findsNeighborhood).get(), "make-believe");
|
||||
}
|
||||
}
|
@ -0,0 +1,47 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugin.reindex;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.annotations.Name;
|
||||
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
|
||||
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||
import org.elasticsearch.test.rest.RestTestCandidate;
|
||||
import org.elasticsearch.test.rest.parser.RestTestParseException;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
|
||||
public class ReindexRestIT extends ESRestTestCase {
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
return pluginList(ReindexPlugin.class);
|
||||
}
|
||||
|
||||
public ReindexRestIT(@Name("yaml") RestTestCandidate testCandidate) {
|
||||
super(testCandidate);
|
||||
}
|
||||
|
||||
@ParametersFactory
|
||||
public static Iterable<Object[]> parameters() throws IOException, RestTestParseException {
|
||||
return ESRestTestCase.createParameters(0, 1);
|
||||
}
|
||||
}
|
@ -0,0 +1,111 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugin.reindex;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.support.AutoCreateIndex;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.AliasMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
|
||||
/**
|
||||
* Tests that indexing from an index back into itself fails the request.
|
||||
*/
|
||||
public class ReindexSameIndexTests extends ESTestCase {
|
||||
private static final ClusterState STATE = ClusterState.builder(new ClusterName("test")).metaData(MetaData.builder()
|
||||
.put(index("target", "target_alias", "target_multi"), true)
|
||||
.put(index("target2", "target_multi"), true)
|
||||
.put(index("foo"), true)
|
||||
.put(index("bar"), true)
|
||||
.put(index("baz"), true)
|
||||
.put(index("source", "source_multi"), true)
|
||||
.put(index("source2", "source_multi"), true)).build();
|
||||
private static final IndexNameExpressionResolver INDEX_NAME_EXPRESSION_RESOLVER = new IndexNameExpressionResolver(Settings.EMPTY);
|
||||
private static final AutoCreateIndex AUTO_CREATE_INDEX = new AutoCreateIndex(Settings.EMPTY, INDEX_NAME_EXPRESSION_RESOLVER);
|
||||
|
||||
public void testObviousCases() throws Exception {
|
||||
fails("target", "target");
|
||||
fails("target", "foo", "bar", "target", "baz");
|
||||
fails("target", "foo", "bar", "target", "baz", "target");
|
||||
succeeds("target", "source");
|
||||
succeeds("target", "source", "source2");
|
||||
}
|
||||
|
||||
public void testAliasesContainTarget() throws Exception {
|
||||
fails("target", "target_alias");
|
||||
fails("target_alias", "target");
|
||||
fails("target", "foo", "bar", "target_alias", "baz");
|
||||
fails("target_alias", "foo", "bar", "target_alias", "baz");
|
||||
fails("target_alias", "foo", "bar", "target", "baz");
|
||||
fails("target", "foo", "bar", "target_alias", "target_alias");
|
||||
fails("target", "target_multi");
|
||||
fails("target", "foo", "bar", "target_multi", "baz");
|
||||
succeeds("target", "source_multi");
|
||||
succeeds("target", "source", "source2", "source_multi");
|
||||
}
|
||||
|
||||
public void testTargetIsAlias() throws Exception {
|
||||
try {
|
||||
succeeds("target_multi", "foo");
|
||||
fail("Expected failure");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), containsString("Alias [target_multi] has more than one indices associated with it [["));
|
||||
// The index names can come in either order
|
||||
assertThat(e.getMessage(), containsString("target"));
|
||||
assertThat(e.getMessage(), containsString("target2"));
|
||||
}
|
||||
}
|
||||
|
||||
private void fails(String target, String... sources) throws Exception {
|
||||
try {
|
||||
succeeds(target, sources);
|
||||
fail("Expected an exception");
|
||||
} catch (ActionRequestValidationException e) {
|
||||
assertThat(e.getMessage(),
|
||||
containsString("reindex cannot write into an index its reading from [target]"));
|
||||
}
|
||||
}
|
||||
|
||||
private void succeeds(String target, String... sources) throws Exception {
|
||||
TransportReindexAction.validateAgainstAliases(new SearchRequest(sources), new IndexRequest(target), INDEX_NAME_EXPRESSION_RESOLVER,
|
||||
AUTO_CREATE_INDEX, STATE);
|
||||
}
|
||||
|
||||
private static IndexMetaData index(String name, String... aliases) {
|
||||
IndexMetaData.Builder builder = IndexMetaData.builder(name).settings(Settings.builder()
|
||||
.put("index.version.created", Version.CURRENT.id)
|
||||
.put("index.number_of_shards", 1)
|
||||
.put("index.number_of_replicas", 1));
|
||||
for (String alias: aliases) {
|
||||
builder.putAlias(AliasMetaData.builder(alias).build());
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
}
|
@ -0,0 +1,138 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugin.reindex;
|
||||
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
|
||||
/**
|
||||
* Tests index-by-search with a script modifying the documents.
|
||||
*/
|
||||
public class ReindexScriptTests extends AbstractAsyncBulkIndexByScrollActionScriptTestCase<ReindexRequest, ReindexResponse> {
|
||||
public void testSetIndex() throws Exception {
|
||||
Object dest = randomFrom(new Object[] {234, 234l, "pancake"});
|
||||
IndexRequest index = applyScript((Map<String, Object> ctx) -> ctx.put("_index", dest));
|
||||
assertEquals(dest.toString(), index.index());
|
||||
}
|
||||
|
||||
public void testSettingIndexToNullIsError() throws Exception {
|
||||
try {
|
||||
applyScript((Map<String, Object> ctx) -> ctx.put("_index", null));
|
||||
} catch (NullPointerException e) {
|
||||
assertThat(e.getMessage(), containsString("Can't reindex without a destination index!"));
|
||||
}
|
||||
}
|
||||
|
||||
public void testSetType() throws Exception {
|
||||
Object type = randomFrom(new Object[] {234, 234l, "pancake"});
|
||||
IndexRequest index = applyScript((Map<String, Object> ctx) -> ctx.put("_type", type));
|
||||
assertEquals(type.toString(), index.type());
|
||||
}
|
||||
|
||||
public void testSettingTypeToNullIsError() throws Exception {
|
||||
try {
|
||||
applyScript((Map<String, Object> ctx) -> ctx.put("_type", null));
|
||||
} catch (NullPointerException e) {
|
||||
assertThat(e.getMessage(), containsString("Can't reindex without a destination type!"));
|
||||
}
|
||||
}
|
||||
|
||||
public void testSetId() throws Exception {
|
||||
Object id = randomFrom(new Object[] {null, 234, 234l, "pancake"});
|
||||
IndexRequest index = applyScript((Map<String, Object> ctx) -> ctx.put("_id", id));
|
||||
if (id == null) {
|
||||
assertNull(index.id());
|
||||
} else {
|
||||
assertEquals(id.toString(), index.id());
|
||||
}
|
||||
}
|
||||
|
||||
public void testSetVersion() throws Exception {
|
||||
Number version = randomFrom(new Number[] {null, 234, 234l});
|
||||
IndexRequest index = applyScript((Map<String, Object> ctx) -> ctx.put("_version", version));
|
||||
if (version == null) {
|
||||
assertEquals(Versions.MATCH_ANY, index.version());
|
||||
} else {
|
||||
assertEquals(version.longValue(), index.version());
|
||||
}
|
||||
}
|
||||
|
||||
public void testSettingVersionToJunkIsAnError() throws Exception {
|
||||
Object junkVersion = randomFrom(new Object[] { "junk", Math.PI });
|
||||
try {
|
||||
applyScript((Map<String, Object> ctx) -> ctx.put("_version", junkVersion));
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), containsString("_version may only be set to an int or a long but was ["));
|
||||
assertThat(e.getMessage(), containsString(junkVersion.toString()));
|
||||
}
|
||||
}
|
||||
|
||||
public void testSetParent() throws Exception {
|
||||
String parent = randomRealisticUnicodeOfLengthBetween(5, 20);
|
||||
IndexRequest index = applyScript((Map<String, Object> ctx) -> ctx.put("_parent", parent));
|
||||
assertEquals(parent, index.parent());
|
||||
}
|
||||
|
||||
public void testSetRouting() throws Exception {
|
||||
String routing = randomRealisticUnicodeOfLengthBetween(5, 20);
|
||||
IndexRequest index = applyScript((Map<String, Object> ctx) -> ctx.put("_routing", routing));
|
||||
assertEquals(routing, index.routing());
|
||||
}
|
||||
|
||||
public void testSetTimestamp() throws Exception {
|
||||
String timestamp = randomFrom(null, "now", "1234");
|
||||
IndexRequest index = applyScript((Map<String, Object> ctx) -> ctx.put("_timestamp", timestamp));
|
||||
assertEquals(timestamp, index.timestamp());
|
||||
}
|
||||
|
||||
public void testSetTtl() throws Exception {
|
||||
Number ttl = randomFrom(new Number[] { null, 1233214, 134143797143L });
|
||||
IndexRequest index = applyScript((Map<String, Object> ctx) -> ctx.put("_ttl", ttl));
|
||||
if (ttl == null) {
|
||||
assertEquals(-1, index.ttl());
|
||||
} else {
|
||||
assertEquals(ttl.longValue(), index.ttl());
|
||||
}
|
||||
}
|
||||
|
||||
public void testSettingTtlToJunkIsAnError() throws Exception {
|
||||
Object junkTtl = randomFrom(new Object[] { "junk", Math.PI });
|
||||
try {
|
||||
applyScript((Map<String, Object> ctx) -> ctx.put("_ttl", junkTtl));
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), containsString("_ttl may only be set to an int or a long but was ["));
|
||||
assertThat(e.getMessage(), containsString(junkTtl.toString()));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ReindexRequest request() {
|
||||
return new ReindexRequest();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected AbstractAsyncBulkIndexByScrollAction<ReindexRequest, ReindexResponse> action() {
|
||||
return new TransportReindexAction.AsyncIndexBySearchAction(logger, null, null, threadPool, request(), listener());
|
||||
}
|
||||
}
|
@ -0,0 +1,77 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugin.reindex;
|
||||
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
||||
import org.hamcrest.Description;
|
||||
import org.hamcrest.Matcher;
|
||||
|
||||
import java.util.Collection;
|
||||
|
||||
import static org.elasticsearch.test.ESIntegTestCase.Scope.SUITE;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
@ClusterScope(scope = SUITE, transportClientRatio = 0)
|
||||
public abstract class ReindexTestCase extends ESIntegTestCase {
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
return pluginList(ReindexPlugin.class);
|
||||
}
|
||||
|
||||
protected ReindexRequestBuilder reindex() {
|
||||
return ReindexAction.INSTANCE.newRequestBuilder(client());
|
||||
}
|
||||
|
||||
public IndexBySearchResponseMatcher responseMatcher() {
|
||||
return new IndexBySearchResponseMatcher();
|
||||
}
|
||||
|
||||
public static class IndexBySearchResponseMatcher
|
||||
extends AbstractBulkIndexByScrollResponseMatcher<ReindexResponse, IndexBySearchResponseMatcher> {
|
||||
private Matcher<Long> createdMatcher = equalTo(0l);
|
||||
|
||||
public IndexBySearchResponseMatcher created(Matcher<Long> updatedMatcher) {
|
||||
this.createdMatcher = updatedMatcher;
|
||||
return this;
|
||||
}
|
||||
|
||||
public IndexBySearchResponseMatcher created(long created) {
|
||||
return created(equalTo(created));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean matchesSafely(ReindexResponse item) {
|
||||
return super.matchesSafely(item) && createdMatcher.matches(item.getCreated());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void describeTo(Description description) {
|
||||
super.describeTo(description);
|
||||
description.appendText(" and created matches ").appendDescriptionOf(createdMatcher);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected IndexBySearchResponseMatcher self() {
|
||||
return this;
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,143 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugin.reindex;
|
||||
|
||||
import org.elasticsearch.action.get.GetResponse;
|
||||
|
||||
import static org.elasticsearch.action.index.IndexRequest.OpType.CREATE;
|
||||
import static org.elasticsearch.index.VersionType.EXTERNAL;
|
||||
import static org.elasticsearch.index.VersionType.INTERNAL;
|
||||
|
||||
|
||||
public class ReindexVersioningTests extends ReindexTestCase {
|
||||
private static final int SOURCE_VERSION = 4;
|
||||
private static final int OLDER_VERSION = 1;
|
||||
private static final int NEWER_VERSION = 10;
|
||||
|
||||
public void testExternalVersioningCreatesWhenAbsentAndSetsVersion() throws Exception {
|
||||
setupSourceAbsent();
|
||||
assertThat(reindexExternal(), responseMatcher().created(1));
|
||||
assertDest("source", SOURCE_VERSION);
|
||||
}
|
||||
|
||||
public void testExternalVersioningUpdatesOnOlderAndSetsVersion() throws Exception {
|
||||
setupDestOlder();
|
||||
assertThat(reindexExternal(), responseMatcher().updated(1));
|
||||
assertDest("source", SOURCE_VERSION);
|
||||
}
|
||||
|
||||
public void testExternalVersioningVersionConflictsOnNewer() throws Exception {
|
||||
setupDestNewer();
|
||||
assertThat(reindexExternal(), responseMatcher().versionConflicts(1));
|
||||
assertDest("dest", NEWER_VERSION);
|
||||
}
|
||||
|
||||
public void testInternalVersioningCreatesWhenAbsent() throws Exception {
|
||||
setupSourceAbsent();
|
||||
assertThat(reindexInternal(), responseMatcher().created(1));
|
||||
assertDest("source", 1);
|
||||
}
|
||||
|
||||
public void testInternalVersioningUpdatesOnOlder() throws Exception {
|
||||
setupDestOlder();
|
||||
assertThat(reindexInternal(), responseMatcher().updated(1));
|
||||
assertDest("source", OLDER_VERSION + 1);
|
||||
}
|
||||
|
||||
public void testInternalVersioningUpdatesOnNewer() throws Exception {
|
||||
setupDestNewer();
|
||||
assertThat(reindexInternal(), responseMatcher().updated(1));
|
||||
assertDest("source", NEWER_VERSION + 1);
|
||||
}
|
||||
|
||||
public void testCreateCreatesWhenAbsent() throws Exception {
|
||||
setupSourceAbsent();
|
||||
assertThat(reindexCreate(), responseMatcher().created(1));
|
||||
assertDest("source", 1);
|
||||
}
|
||||
|
||||
public void testCreateVersionConflictsOnOlder() throws Exception {
|
||||
setupDestOlder();
|
||||
assertThat(reindexCreate(), responseMatcher().versionConflicts(1));
|
||||
assertDest("dest", OLDER_VERSION);
|
||||
}
|
||||
|
||||
public void testCreateVersionConflictsOnNewer() throws Exception {
|
||||
setupDestNewer();
|
||||
assertThat(reindexCreate(), responseMatcher().versionConflicts(1));
|
||||
assertDest("dest", NEWER_VERSION);
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform a reindex with EXTERNAL versioning which has "refresh" semantics.
|
||||
*/
|
||||
private ReindexResponse reindexExternal() {
|
||||
ReindexRequestBuilder reindex = reindex().source("source").destination("dest").abortOnVersionConflict(false);
|
||||
reindex.destination().setVersionType(EXTERNAL);
|
||||
return reindex.get();
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform a reindex with INTERNAL versioning which has "overwrite" semantics.
|
||||
*/
|
||||
private ReindexResponse reindexInternal() {
|
||||
ReindexRequestBuilder reindex = reindex().source("source").destination("dest").abortOnVersionConflict(false);
|
||||
reindex.destination().setVersionType(INTERNAL);
|
||||
return reindex.get();
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform a reindex with CREATE OpType which has "create" semantics.
|
||||
*/
|
||||
private ReindexResponse reindexCreate() {
|
||||
ReindexRequestBuilder reindex = reindex().source("source").destination("dest").abortOnVersionConflict(false);
|
||||
reindex.destination().setOpType(CREATE);
|
||||
return reindex.get();
|
||||
}
|
||||
|
||||
private void setupSourceAbsent() throws Exception {
|
||||
indexRandom(true, client().prepareIndex("source", "test", "test").setVersionType(EXTERNAL)
|
||||
.setVersion(SOURCE_VERSION).setSource("foo", "source"));
|
||||
|
||||
assertEquals(SOURCE_VERSION, client().prepareGet("source", "test", "test").get().getVersion());
|
||||
}
|
||||
|
||||
private void setupDest(int version) throws Exception {
|
||||
setupSourceAbsent();
|
||||
indexRandom(true, client().prepareIndex("dest", "test", "test").setVersionType(EXTERNAL)
|
||||
.setVersion(version).setSource("foo", "dest"));
|
||||
|
||||
assertEquals(version, client().prepareGet("dest", "test", "test").get().getVersion());
|
||||
}
|
||||
|
||||
private void setupDestOlder() throws Exception {
|
||||
setupDest(OLDER_VERSION);
|
||||
}
|
||||
|
||||
private void setupDestNewer() throws Exception {
|
||||
setupDest(NEWER_VERSION);
|
||||
}
|
||||
|
||||
private void assertDest(String fooValue, int version) {
|
||||
GetResponse get = client().prepareGet("dest", "test", "test").get();
|
||||
assertEquals(fooValue, get.getSource().get("foo"));
|
||||
assertEquals(version, get.getVersion());
|
||||
}
|
||||
}
|
@ -0,0 +1,163 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugin.reindex;
|
||||
|
||||
import org.elasticsearch.action.WriteConsistencyLevel;
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse.Failure;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptService.ScriptType;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
import static java.util.Collections.emptyList;
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.singletonList;
|
||||
import static org.apache.lucene.util.TestUtil.randomSimpleString;
|
||||
|
||||
/**
|
||||
* Round trip tests for all Streamable things declared in this plugin.
|
||||
*/
|
||||
public class RoundTripTests extends ESTestCase {
|
||||
public void testReindexRequest() throws IOException {
|
||||
ReindexRequest reindex = new ReindexRequest(new SearchRequest(), new IndexRequest());
|
||||
randomRequest(reindex);
|
||||
reindex.getDestination().version(randomFrom(Versions.MATCH_ANY, Versions.MATCH_DELETED, 12L, 1L, 123124L, 12L));
|
||||
reindex.getDestination().index("test");
|
||||
ReindexRequest tripped = new ReindexRequest();
|
||||
roundTrip(reindex, tripped);
|
||||
assertRequestEquals(reindex, tripped);
|
||||
assertEquals(reindex.getDestination().version(), tripped.getDestination().version());
|
||||
assertEquals(reindex.getDestination().index(), tripped.getDestination().index());
|
||||
}
|
||||
|
||||
public void testUpdateByQueryRequest() throws IOException {
|
||||
UpdateByQueryRequest update = new UpdateByQueryRequest(new SearchRequest());
|
||||
randomRequest(update);
|
||||
UpdateByQueryRequest tripped = new UpdateByQueryRequest();
|
||||
roundTrip(update, tripped);
|
||||
assertRequestEquals(update, tripped);
|
||||
}
|
||||
|
||||
public void testReindexResponse() throws IOException {
|
||||
ReindexResponse response = new ReindexResponse(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
||||
randomPositiveInt(), randomPositiveLong(), randomPositiveLong(), randomIndexingFailures(), randomSearchFailures());
|
||||
ReindexResponse tripped = new ReindexResponse();
|
||||
roundTrip(response, tripped);
|
||||
assertResponseEquals(response, tripped);
|
||||
assertEquals(response.getCreated(), tripped.getCreated());
|
||||
}
|
||||
|
||||
public void testBulkIndexByScrollResponse() throws IOException {
|
||||
BulkIndexByScrollResponse response = new BulkIndexByScrollResponse(randomPositiveLong(), randomPositiveLong(), randomPositiveInt(),
|
||||
randomPositiveLong(), randomPositiveLong(), randomIndexingFailures(), randomSearchFailures());
|
||||
BulkIndexByScrollResponse tripped = new BulkIndexByScrollResponse();
|
||||
roundTrip(response, tripped);
|
||||
assertResponseEquals(response, tripped);
|
||||
}
|
||||
|
||||
private void randomRequest(AbstractBulkIndexByScrollRequest<?> request) {
|
||||
request.getSource().indices("test");
|
||||
request.getSource().source().size(between(1, 1000));
|
||||
request.setSize(random().nextBoolean() ? between(1, Integer.MAX_VALUE) : -1);
|
||||
request.setAbortOnVersionConflict(random().nextBoolean());
|
||||
request.setRefresh(rarely());
|
||||
request.setTimeout(TimeValue.parseTimeValue(randomTimeValue(), null, "test"));
|
||||
request.setConsistency(randomFrom(WriteConsistencyLevel.values()));
|
||||
request.setScript(random().nextBoolean() ? null : randomScript());
|
||||
}
|
||||
|
||||
private void assertRequestEquals(AbstractBulkIndexByScrollRequest<?> request,
|
||||
AbstractBulkIndexByScrollRequest<?> tripped) {
|
||||
assertArrayEquals(request.getSource().indices(), tripped.getSource().indices());
|
||||
assertEquals(request.getSource().source().size(), tripped.getSource().source().size());
|
||||
assertEquals(request.isAbortOnVersionConflict(), tripped.isAbortOnVersionConflict());
|
||||
assertEquals(request.isRefresh(), tripped.isRefresh());
|
||||
assertEquals(request.getTimeout(), tripped.getTimeout());
|
||||
assertEquals(request.getConsistency(), tripped.getConsistency());
|
||||
assertEquals(request.getScript(), tripped.getScript());
|
||||
}
|
||||
|
||||
private List<Failure> randomIndexingFailures() {
|
||||
return usually() ? emptyList()
|
||||
: singletonList(new Failure(randomSimpleString(random()), randomSimpleString(random()),
|
||||
randomSimpleString(random()), new IllegalArgumentException("test")));
|
||||
}
|
||||
|
||||
private List<ShardSearchFailure> randomSearchFailures() {
|
||||
return usually() ? emptyList()
|
||||
: singletonList(new ShardSearchFailure(randomSimpleString(random()), new SearchShardTarget(randomSimpleString(random()),
|
||||
randomSimpleString(random()), randomInt()), randomFrom(RestStatus.values())));
|
||||
}
|
||||
|
||||
|
||||
private void assertResponseEquals(BulkIndexByScrollResponse response, BulkIndexByScrollResponse tripped) {
|
||||
assertEquals(response.getTook(), tripped.getTook());
|
||||
assertEquals(response.getUpdated(), tripped.getUpdated());
|
||||
assertEquals(response.getBatches(), tripped.getBatches());
|
||||
assertEquals(response.getVersionConflicts(), tripped.getVersionConflicts());
|
||||
assertEquals(response.getNoops(), tripped.getNoops());
|
||||
assertEquals(response.getIndexingFailures().size(), tripped.getIndexingFailures().size());
|
||||
for (int i = 0; i < response.getIndexingFailures().size(); i++) {
|
||||
Failure expected = response.getIndexingFailures().get(i);
|
||||
Failure actual = tripped.getIndexingFailures().get(i);
|
||||
assertEquals(expected.getIndex(), actual.getIndex());
|
||||
assertEquals(expected.getType(), actual.getType());
|
||||
assertEquals(expected.getId(), actual.getId());
|
||||
assertEquals(expected.getMessage(), actual.getMessage());
|
||||
assertEquals(expected.getStatus(), actual.getStatus());
|
||||
}
|
||||
}
|
||||
|
||||
private void roundTrip(Streamable example, Streamable empty) throws IOException {
|
||||
BytesStreamOutput out = new BytesStreamOutput();
|
||||
example.writeTo(out);
|
||||
empty.readFrom(out.bytes().streamInput());
|
||||
}
|
||||
|
||||
private Script randomScript() {
|
||||
return new Script(randomSimpleString(random()), // Name
|
||||
randomFrom(ScriptType.values()), // Type
|
||||
random().nextBoolean() ? null : randomSimpleString(random()), // Language
|
||||
emptyMap()); // Params
|
||||
}
|
||||
|
||||
private long randomPositiveLong() {
|
||||
long l;
|
||||
do {
|
||||
l = randomLong();
|
||||
} while (l < 0);
|
||||
return l;
|
||||
}
|
||||
|
||||
private int randomPositiveInt() {
|
||||
return randomInt(Integer.MAX_VALUE);
|
||||
}
|
||||
}
|
@ -0,0 +1,55 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugin.reindex;
|
||||
|
||||
import org.elasticsearch.script.ExecutableScript;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
public class SimpleExecutableScript implements ExecutableScript {
|
||||
private final Consumer<Map<String, Object>> script;
|
||||
private Map<String, Object> ctx;
|
||||
|
||||
public SimpleExecutableScript(Consumer<Map<String, Object>> script) {
|
||||
this.script = script;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object run() {
|
||||
script.accept(ctx);
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public void setNextVar(String name, Object value) {
|
||||
if ("ctx".equals(name)) {
|
||||
ctx = (Map<String, Object>) value;
|
||||
} else {
|
||||
throw new IllegalArgumentException("Unsupported var [" + name + "]");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object unwrap(Object value) {
|
||||
return value;
|
||||
}
|
||||
}
|
@ -0,0 +1,107 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugin.reindex;
|
||||
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
|
||||
import org.elasticsearch.search.sort.SortOrder;
|
||||
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||
|
||||
public class UpdateByQueryBasicTests extends UpdateByQueryTestCase {
|
||||
public void testBasics() throws Exception {
|
||||
indexRandom(true, client().prepareIndex("test", "test", "1").setSource("foo", "a"),
|
||||
client().prepareIndex("test", "test", "2").setSource("foo", "a"),
|
||||
client().prepareIndex("test", "test", "3").setSource("foo", "b"),
|
||||
client().prepareIndex("test", "test", "4").setSource("foo", "c"));
|
||||
assertHitCount(client().prepareSearch("test").setTypes("test").setSize(0).get(), 4);
|
||||
assertEquals(1, client().prepareGet("test", "test", "1").get().getVersion());
|
||||
assertEquals(1, client().prepareGet("test", "test", "4").get().getVersion());
|
||||
|
||||
// Reindex all the docs
|
||||
assertThat(request().source("test").refresh(true).get(), responseMatcher().updated(4));
|
||||
assertEquals(2, client().prepareGet("test", "test", "1").get().getVersion());
|
||||
assertEquals(2, client().prepareGet("test", "test", "4").get().getVersion());
|
||||
|
||||
// Now none of them
|
||||
assertThat(request().source("test").filter(termQuery("foo", "no_match")).refresh(true).get(), responseMatcher().updated(0));
|
||||
assertEquals(2, client().prepareGet("test", "test", "1").get().getVersion());
|
||||
assertEquals(2, client().prepareGet("test", "test", "4").get().getVersion());
|
||||
|
||||
// Now half of them
|
||||
assertThat(request().source("test").filter(termQuery("foo", "a")).refresh(true).get(), responseMatcher().updated(2));
|
||||
assertEquals(3, client().prepareGet("test", "test", "1").get().getVersion());
|
||||
assertEquals(3, client().prepareGet("test", "test", "2").get().getVersion());
|
||||
assertEquals(2, client().prepareGet("test", "test", "3").get().getVersion());
|
||||
assertEquals(2, client().prepareGet("test", "test", "4").get().getVersion());
|
||||
|
||||
// Limit with size
|
||||
UpdateByQueryRequestBuilder request = request().source("test").size(3).refresh(true);
|
||||
request.source().addSort("foo", SortOrder.ASC);
|
||||
assertThat(request.get(), responseMatcher().updated(3));
|
||||
// Only the first three documents are updated because of sort
|
||||
assertEquals(4, client().prepareGet("test", "test", "1").get().getVersion());
|
||||
assertEquals(4, client().prepareGet("test", "test", "2").get().getVersion());
|
||||
assertEquals(3, client().prepareGet("test", "test", "3").get().getVersion());
|
||||
assertEquals(2, client().prepareGet("test", "test", "4").get().getVersion());
|
||||
}
|
||||
|
||||
public void testRefreshIsFalseByDefault() throws Exception {
|
||||
refreshTestCase(null, false);
|
||||
}
|
||||
|
||||
public void testRefreshFalseDoesntMakeVisible() throws Exception {
|
||||
refreshTestCase(false, false);
|
||||
}
|
||||
|
||||
public void testRefreshTrueMakesVisible() throws Exception {
|
||||
refreshTestCase(true, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes an update_by_query on an index with -1 refresh_interval and
|
||||
* checks that the documents are visible properly.
|
||||
*/
|
||||
private void refreshTestCase(Boolean refresh, boolean visible) throws Exception {
|
||||
CreateIndexRequestBuilder create = client().admin().indices().prepareCreate("test").setSettings("refresh_interval", -1);
|
||||
create.addMapping("test", "{\"dynamic\": \"false\"}");
|
||||
assertAcked(create);
|
||||
ensureYellow();
|
||||
indexRandom(true, client().prepareIndex("test", "test", "1").setSource("foo", "a"),
|
||||
client().prepareIndex("test", "test", "2").setSource("foo", "a"),
|
||||
client().prepareIndex("test", "test", "3").setSource("foo", "b"),
|
||||
client().prepareIndex("test", "test", "4").setSource("foo", "c"));
|
||||
assertHitCount(client().prepareSearch("test").setQuery(matchQuery("foo", "a")).setSize(0).get(), 0);
|
||||
|
||||
// Now make foo searchable
|
||||
assertAcked(client().admin().indices().preparePutMapping("test").setType("test")
|
||||
.setSource("{\"test\": {\"properties\":{\"foo\": {\"type\": \"string\"}}}}"));
|
||||
UpdateByQueryRequestBuilder update = request().source("test");
|
||||
if (refresh != null) {
|
||||
update.refresh(refresh);
|
||||
}
|
||||
assertThat(update.get(), responseMatcher().updated(4));
|
||||
|
||||
assertHitCount(client().prepareSearch("test").setQuery(matchQuery("foo", "a")).setSize(0).get(), visible ? 2 : 0);
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,43 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugin.reindex;
|
||||
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.index.mapper.internal.RoutingFieldMapper;
|
||||
|
||||
public class UpdateByQueryMetadataTests
|
||||
extends AbstractAsyncBulkIndexbyScrollActionMetadataTestCase<UpdateByQueryRequest, BulkIndexByScrollResponse> {
|
||||
public void testRoutingIsCopied() throws Exception {
|
||||
IndexRequest index = new IndexRequest();
|
||||
action().copyMetadata(index, doc(RoutingFieldMapper.NAME, "foo"));
|
||||
assertEquals("foo", index.routing());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TransportUpdateByQueryAction.AsyncIndexBySearchAction action() {
|
||||
return new TransportUpdateByQueryAction.AsyncIndexBySearchAction(logger, null, null, threadPool, request(), listener());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected UpdateByQueryRequest request() {
|
||||
return new UpdateByQueryRequest(new SearchRequest());
|
||||
}
|
||||
}
|
@ -0,0 +1,52 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugin.reindex;
|
||||
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
||||
|
||||
import java.util.Collection;
|
||||
|
||||
import static org.elasticsearch.test.ESIntegTestCase.Scope.SUITE;
|
||||
|
||||
@ClusterScope(scope = SUITE, transportClientRatio = 0)
|
||||
public abstract class UpdateByQueryTestCase extends ESIntegTestCase {
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
return pluginList(ReindexPlugin.class);
|
||||
}
|
||||
|
||||
protected UpdateByQueryRequestBuilder request() {
|
||||
return UpdateByQueryAction.INSTANCE.newRequestBuilder(client());
|
||||
}
|
||||
|
||||
public BulkIndexbyScrollResponseMatcher responseMatcher() {
|
||||
return new BulkIndexbyScrollResponseMatcher();
|
||||
}
|
||||
|
||||
public static class BulkIndexbyScrollResponseMatcher extends
|
||||
AbstractBulkIndexByScrollResponseMatcher<BulkIndexByScrollResponse, BulkIndexbyScrollResponseMatcher> {
|
||||
@Override
|
||||
protected BulkIndexbyScrollResponseMatcher self() {
|
||||
return this;
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,98 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugin.reindex;
|
||||
|
||||
import org.elasticsearch.action.get.GetResponse;
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.index.engine.VersionConflictEngineException;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static org.apache.lucene.util.TestUtil.randomSimpleString;
|
||||
import static org.hamcrest.Matchers.either;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
/**
|
||||
* Mutates a document while update-by-query-ing it and asserts that the mutation
|
||||
* always sticks. Update-by-query should never revert documents.
|
||||
*/
|
||||
public class UpdateByQueryWhileModifyingTests extends UpdateByQueryTestCase {
|
||||
private static final int MAX_MUTATIONS = 50;
|
||||
private static final int MAX_ATTEMPTS = 10;
|
||||
|
||||
public void testUpdateWhileReindexing() throws Exception {
|
||||
AtomicReference<String> value = new AtomicReference<>(randomSimpleString(random()));
|
||||
indexRandom(true, client().prepareIndex("test", "test", "test").setSource("test", value.get()));
|
||||
|
||||
AtomicReference<Throwable> failure = new AtomicReference<>();
|
||||
AtomicBoolean keepUpdating = new AtomicBoolean(true);
|
||||
Thread updater = new Thread(() -> {
|
||||
while (keepUpdating.get()) {
|
||||
try {
|
||||
assertThat(request().source("test").refresh(true).abortOnVersionConflict(false).get(), responseMatcher()
|
||||
.updated(either(equalTo(0L)).or(equalTo(1L))).versionConflicts(either(equalTo(0L)).or(equalTo(1L))));
|
||||
} catch (Throwable t) {
|
||||
failure.set(t);
|
||||
}
|
||||
}
|
||||
});
|
||||
updater.start();
|
||||
|
||||
try {
|
||||
for (int i = 0; i < MAX_MUTATIONS; i++) {
|
||||
GetResponse get = client().prepareGet("test", "test", "test").get();
|
||||
assertEquals(value.get(), get.getSource().get("test"));
|
||||
value.set(randomSimpleString(random()));
|
||||
IndexRequestBuilder index = client().prepareIndex("test", "test", "test").setSource("test", value.get())
|
||||
.setRefresh(true);
|
||||
/*
|
||||
* Update by query increments the version number so concurrent
|
||||
* indexes might get version conflict exceptions so we just
|
||||
* blindly retry.
|
||||
*/
|
||||
int attempts = 0;
|
||||
while (true) {
|
||||
attempts++;
|
||||
try {
|
||||
index.setVersion(get.getVersion()).get();
|
||||
break;
|
||||
} catch (VersionConflictEngineException e) {
|
||||
if (attempts >= MAX_ATTEMPTS) {
|
||||
throw new RuntimeException("Failed to index after [" + MAX_ATTEMPTS + "] attempts. Too many version conflicts!");
|
||||
}
|
||||
logger.info(
|
||||
"Caught expected version conflict trying to perform mutation number {} with version {}. Retrying.",
|
||||
i, get.getVersion());
|
||||
get = client().prepareGet("test", "test", "test").get();
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
keepUpdating.set(false);
|
||||
updater.join(TimeUnit.SECONDS.toMillis(10));
|
||||
if (failure.get() != null) {
|
||||
throw new RuntimeException(failure.get());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,55 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugin.reindex;
|
||||
|
||||
import java.util.Date;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
|
||||
public class UpdateByQueryWithScriptTests
|
||||
extends AbstractAsyncBulkIndexByScrollActionScriptTestCase<UpdateByQueryRequest, BulkIndexByScrollResponse> {
|
||||
public void testModifyingCtxNotAllowed() {
|
||||
/*
|
||||
* Its important that none of these actually match any of the fields.
|
||||
* They don't now, but make sure they still don't match if you add any
|
||||
* more. The point of have many is that they should all present the same
|
||||
* error message to the user, not some ClassCastException.
|
||||
*/
|
||||
Object[] options = new Object[] {"cat", new Object(), 123, new Date(), Math.PI};
|
||||
for (String ctxVar: new String[] {"_index", "_type", "_id", "_version", "_parent", "_routing", "_timestamp", "_ttl"}) {
|
||||
try {
|
||||
applyScript((Map<String, Object> ctx) -> ctx.put(ctxVar, randomFrom(options)));
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), containsString("Modifying [" + ctxVar + "] not allowed"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected UpdateByQueryRequest request() {
|
||||
return new UpdateByQueryRequest();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected AbstractAsyncBulkIndexByScrollAction<UpdateByQueryRequest, BulkIndexByScrollResponse> action() {
|
||||
return new TransportUpdateByQueryAction.AsyncIndexBySearchAction(logger, null, null, threadPool, request(), listener());
|
||||
}
|
||||
}
|
@ -0,0 +1,30 @@
|
||||
{
|
||||
"reindex": {
|
||||
"documentation": "https://www.elastic.co/guide/en/elasticsearch/plugins/master/plugins-reindex.html",
|
||||
"methods": ["POST"],
|
||||
"url": {
|
||||
"path": "/_reindex",
|
||||
"paths": ["/_reindex"],
|
||||
"parts": {},
|
||||
"params": {
|
||||
"refresh": {
|
||||
"type" : "boolean",
|
||||
"description" : "Should the effected indexes be refreshed?"
|
||||
},
|
||||
"timeout": {
|
||||
"type" : "time",
|
||||
"default": "1m",
|
||||
"description" : "Time each individual bulk request should wait for shards that are unavailable."
|
||||
},
|
||||
"consistency": {
|
||||
"type" : "enum",
|
||||
"options" : ["one", "quorum", "all"],
|
||||
"description" : "Explicit write consistency setting for the operation"
|
||||
}
|
||||
}
|
||||
},
|
||||
"body": {
|
||||
"description": "The search definition using the Query DSL and the prototype for the index request."
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,195 @@
|
||||
{
|
||||
"update-by-query": {
|
||||
"documentation": "https://www.elastic.co/guide/en/elasticsearch/plugins/master/plugins-reindex.html",
|
||||
"methods": ["POST"],
|
||||
"url": {
|
||||
"path": "/{index}/_update_by_query",
|
||||
"paths": ["/{index}/_update_by_query", "/{index}/{type}/_update_by_query"],
|
||||
"comment": "most things below this are just copied from search.json",
|
||||
"parts": {
|
||||
"index": {
|
||||
"type" : "list",
|
||||
"description" : "A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices"
|
||||
},
|
||||
"type": {
|
||||
"type" : "list",
|
||||
"description" : "A comma-separated list of document types to search; leave empty to perform the operation on all types"
|
||||
}
|
||||
},
|
||||
"params": {
|
||||
"analyzer": {
|
||||
"type" : "string",
|
||||
"description" : "The analyzer to use for the query string"
|
||||
},
|
||||
"analyze_wildcard": {
|
||||
"type" : "boolean",
|
||||
"description" : "Specify whether wildcard and prefix queries should be analyzed (default: false)"
|
||||
},
|
||||
"default_operator": {
|
||||
"type" : "enum",
|
||||
"options" : ["AND","OR"],
|
||||
"default" : "OR",
|
||||
"description" : "The default operator for query string query (AND or OR)"
|
||||
},
|
||||
"df": {
|
||||
"type" : "string",
|
||||
"description" : "The field to use as default where no field prefix is given in the query string"
|
||||
},
|
||||
"explain": {
|
||||
"type" : "boolean",
|
||||
"description" : "Specify whether to return detailed information about score computation as part of a hit"
|
||||
},
|
||||
"fields": {
|
||||
"type" : "list",
|
||||
"description" : "A comma-separated list of fields to return as part of a hit"
|
||||
},
|
||||
"fielddata_fields": {
|
||||
"type" : "list",
|
||||
"description" : "A comma-separated list of fields to return as the field data representation of a field for each hit"
|
||||
},
|
||||
"from": {
|
||||
"type" : "number",
|
||||
"description" : "Starting offset (default: 0)"
|
||||
},
|
||||
"ignore_unavailable": {
|
||||
"type" : "boolean",
|
||||
"description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)"
|
||||
},
|
||||
"allow_no_indices": {
|
||||
"type" : "boolean",
|
||||
"description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)"
|
||||
},
|
||||
"conflicts": {
|
||||
"note": "This is not copied from search",
|
||||
"type" : "enum",
|
||||
"options": ["abort", "proceed"],
|
||||
"default": "abort",
|
||||
"description" : "What to do when the reindex hits version conflicts?"
|
||||
},
|
||||
"expand_wildcards": {
|
||||
"type" : "enum",
|
||||
"options" : ["open","closed","none","all"],
|
||||
"default" : "open",
|
||||
"description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both."
|
||||
},
|
||||
"lenient": {
|
||||
"type" : "boolean",
|
||||
"description" : "Specify whether format-based query failures (such as providing text to a numeric field) should be ignored"
|
||||
},
|
||||
"lowercase_expanded_terms": {
|
||||
"type" : "boolean",
|
||||
"description" : "Specify whether query terms should be lowercased"
|
||||
},
|
||||
"preference": {
|
||||
"type" : "string",
|
||||
"description" : "Specify the node or shard the operation should be performed on (default: random)"
|
||||
},
|
||||
"q": {
|
||||
"type" : "string",
|
||||
"description" : "Query in the Lucene query string syntax"
|
||||
},
|
||||
"routing": {
|
||||
"type" : "list",
|
||||
"description" : "A comma-separated list of specific routing values"
|
||||
},
|
||||
"scroll": {
|
||||
"type" : "duration",
|
||||
"description" : "Specify how long a consistent view of the index should be maintained for scrolled search"
|
||||
},
|
||||
"search_type": {
|
||||
"type" : "enum",
|
||||
"options" : ["query_then_fetch", "dfs_query_then_fetch"],
|
||||
"description" : "Search operation type"
|
||||
},
|
||||
"size": {
|
||||
"type" : "number",
|
||||
"description" : "Number of hits to return (default: 10)"
|
||||
},
|
||||
"sort": {
|
||||
"type" : "list",
|
||||
"description" : "A comma-separated list of <field>:<direction> pairs"
|
||||
},
|
||||
"_source": {
|
||||
"type" : "list",
|
||||
"description" : "True or false to return the _source field or not, or a list of fields to return"
|
||||
},
|
||||
"_source_exclude": {
|
||||
"type" : "list",
|
||||
"description" : "A list of fields to exclude from the returned _source field"
|
||||
},
|
||||
"_source_include": {
|
||||
"type" : "list",
|
||||
"description" : "A list of fields to extract and return from the _source field"
|
||||
},
|
||||
"terminate_after": {
|
||||
"type" : "number",
|
||||
"description" : "The maximum number of documents to collect for each shard, upon reaching which the query execution will terminate early."
|
||||
},
|
||||
"stats": {
|
||||
"type" : "list",
|
||||
"description" : "Specific 'tag' of the request for logging and statistical purposes"
|
||||
},
|
||||
"suggest_field": {
|
||||
"type" : "string",
|
||||
"description" : "Specify which field to use for suggestions"
|
||||
},
|
||||
"suggest_mode": {
|
||||
"type" : "enum",
|
||||
"options" : ["missing", "popular", "always"],
|
||||
"default" : "missing",
|
||||
"description" : "Specify suggest mode"
|
||||
},
|
||||
"suggest_size": {
|
||||
"type" : "number",
|
||||
"description" : "How many suggestions to return in response"
|
||||
},
|
||||
"suggest_text": {
|
||||
"type" : "text",
|
||||
"description" : "The source text for which the suggestions should be returned"
|
||||
},
|
||||
"timeout": {
|
||||
"type" : "time",
|
||||
"description" : "Explicit operation timeout"
|
||||
},
|
||||
"track_scores": {
|
||||
"type" : "boolean",
|
||||
"description": "Whether to calculate and return scores even if they are not used for sorting"
|
||||
},
|
||||
"version": {
|
||||
"type" : "boolean",
|
||||
"description" : "Specify whether to return document version as part of a hit"
|
||||
},
|
||||
"version_type": {
|
||||
"type" : "boolean",
|
||||
"description" : "Should the document increment the version number (internal) on hit or not (reindex)"
|
||||
},
|
||||
"request_cache": {
|
||||
"type" : "boolean",
|
||||
"description" : "Specify if request cache should be used for this request or not, defaults to index level setting"
|
||||
},
|
||||
"refresh": {
|
||||
"type" : "boolean",
|
||||
"description" : "Should the effected indexes be refreshed?"
|
||||
},
|
||||
"timeout": {
|
||||
"type" : "time",
|
||||
"default": "1m",
|
||||
"description" : "Time each individual bulk request should wait for shards that are unavailable."
|
||||
},
|
||||
"consistency": {
|
||||
"type" : "enum",
|
||||
"options" : ["one", "quorum", "all"],
|
||||
"description" : "Explicit write consistency setting for the operation"
|
||||
},
|
||||
"scroll_size": {
|
||||
"type": "integer",
|
||||
"defaut_value": 100,
|
||||
"description": "Size on the scroll request powering the update-by-query"
|
||||
}
|
||||
}
|
||||
},
|
||||
"body": {
|
||||
"description": "The search definition using the Query DSL"
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,333 @@
|
||||
---
|
||||
"Response format for created":
|
||||
- do:
|
||||
index:
|
||||
index: source
|
||||
type: foo
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
body:
|
||||
source:
|
||||
index: source
|
||||
dest:
|
||||
index: dest
|
||||
- match: {created: 1}
|
||||
- match: {updated: 0}
|
||||
- match: {version_conflicts: 0}
|
||||
- match: {batches: 1}
|
||||
- match: {failures: []}
|
||||
- is_true: took
|
||||
|
||||
---
|
||||
"Response format for updated":
|
||||
- do:
|
||||
index:
|
||||
index: source
|
||||
type: foo
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
index:
|
||||
index: dest
|
||||
type: foo
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
body:
|
||||
source:
|
||||
index: source
|
||||
dest:
|
||||
index: dest
|
||||
- match: {created: 0}
|
||||
- match: {updated: 1}
|
||||
- match: {version_conflicts: 0}
|
||||
- match: {batches: 1}
|
||||
- match: {failures: []}
|
||||
- is_true: took
|
||||
|
||||
---
|
||||
"Response format for version conflict":
|
||||
- do:
|
||||
index:
|
||||
index: source
|
||||
type: foo
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
index:
|
||||
index: dest
|
||||
type: foo
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
catch: conflict
|
||||
reindex:
|
||||
body:
|
||||
source:
|
||||
index: source
|
||||
dest:
|
||||
index: dest
|
||||
op_type: create
|
||||
- match: {created: 0}
|
||||
- match: {updated: 0}
|
||||
- match: {version_conflicts: 1}
|
||||
- match: {batches: 1}
|
||||
- match: {failures.0.index: dest}
|
||||
- match: {failures.0.type: foo}
|
||||
- match: {failures.0.id: "1"}
|
||||
- match: {failures.0.status: 409}
|
||||
- match: {failures.0.cause.type: version_conflict_engine_exception}
|
||||
- match: {failures.0.cause.reason: "[foo][1]: version conflict, document already exists (current version [1])"}
|
||||
- match: {failures.0.cause.shard: /\d+/}
|
||||
- match: {failures.0.cause.index: dest}
|
||||
- is_true: took
|
||||
|
||||
---
|
||||
"Response format for version conflict with conflicts=proceed":
|
||||
- do:
|
||||
index:
|
||||
index: source
|
||||
type: foo
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
index:
|
||||
index: dest
|
||||
type: foo
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
body:
|
||||
conflicts: proceed
|
||||
source:
|
||||
index: source
|
||||
dest:
|
||||
index: dest
|
||||
op_type: create
|
||||
- match: {created: 0}
|
||||
- match: {updated: 0}
|
||||
- match: {version_conflicts: 1}
|
||||
- match: {batches: 1}
|
||||
- match: {failures: []}
|
||||
- is_true: took
|
||||
|
||||
---
|
||||
"Simplest example in docs":
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
index: twitter
|
||||
dest:
|
||||
index: new_twitter
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: new_twitter
|
||||
- match: { hits.total: 1 }
|
||||
|
||||
---
|
||||
"Limit by type example in docs":
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: junk
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
index: twitter
|
||||
type: tweet
|
||||
dest:
|
||||
index: new_twitter
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: new_twitter
|
||||
- match: { hits.total: 1 }
|
||||
|
||||
---
|
||||
"Limit by query example in docs":
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 2
|
||||
body: { "user": "junk" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
index: twitter
|
||||
query:
|
||||
match:
|
||||
user: kimchy
|
||||
dest:
|
||||
index: new_twitter
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: new_twitter
|
||||
- match: { hits.total: 1 }
|
||||
|
||||
---
|
||||
"Override type example in docs":
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: junk
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
index: twitter
|
||||
type: tweet
|
||||
dest:
|
||||
index: new_twitter
|
||||
type: chirp
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: new_twitter
|
||||
type: chirp
|
||||
- match: { hits.total: 1 }
|
||||
|
||||
---
|
||||
"Multi index, multi type example from docs":
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
index:
|
||||
index: blog
|
||||
type: post
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
index: [twitter, blog]
|
||||
type: [tweet, post]
|
||||
dest:
|
||||
index: all_together
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: all_together
|
||||
type: tweet
|
||||
body:
|
||||
query:
|
||||
match:
|
||||
user: kimchy
|
||||
- match: { hits.total: 1 }
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: all_together
|
||||
type: post
|
||||
body:
|
||||
query:
|
||||
match:
|
||||
user: kimchy
|
||||
- match: { hits.total: 1 }
|
||||
|
||||
---
|
||||
"Limit by size example from docs":
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 2
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
size: 1
|
||||
source:
|
||||
index: twitter
|
||||
dest:
|
||||
index: new_twitter
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: new_twitter
|
||||
type: tweet
|
||||
- match: { hits.total: 1 }
|
@ -0,0 +1,150 @@
|
||||
---
|
||||
"no body fails":
|
||||
- do:
|
||||
catch: /body required/
|
||||
reindex: {}
|
||||
|
||||
---
|
||||
"junk in body fails":
|
||||
- do:
|
||||
catch: /unknown field \[junk\]/
|
||||
reindex:
|
||||
body:
|
||||
junk:
|
||||
more_junk:
|
||||
|
||||
---
|
||||
"junk in source fails":
|
||||
- do:
|
||||
catch: /Unknown key for a START_OBJECT in \[junk\]./
|
||||
reindex:
|
||||
body:
|
||||
source:
|
||||
junk: {}
|
||||
|
||||
---
|
||||
"junk in dest fails":
|
||||
- do:
|
||||
catch: /unknown field \[junk\]/
|
||||
reindex:
|
||||
body:
|
||||
dest:
|
||||
junk: {}
|
||||
|
||||
---
|
||||
"no index on destination fails":
|
||||
- do:
|
||||
catch: /index must be specified/
|
||||
reindex:
|
||||
body:
|
||||
dest: {}
|
||||
|
||||
---
|
||||
"source size is accepted":
|
||||
- do:
|
||||
index:
|
||||
index: source
|
||||
type: foo
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
reindex:
|
||||
body:
|
||||
source:
|
||||
index: source
|
||||
size: 1000
|
||||
dest:
|
||||
index: dest
|
||||
|
||||
---
|
||||
"search size fails if not a number":
|
||||
- do:
|
||||
catch: '/NumberFormatException: For input string: "cat"/'
|
||||
reindex:
|
||||
body:
|
||||
source:
|
||||
size: cat
|
||||
dest:
|
||||
index: dest
|
||||
|
||||
---
|
||||
"search from is not supported":
|
||||
- do:
|
||||
catch: /from is not supported in this context/
|
||||
reindex:
|
||||
body:
|
||||
source:
|
||||
from: 1
|
||||
dest:
|
||||
index: dest
|
||||
|
||||
---
|
||||
"overwriting version is not supported":
|
||||
- do:
|
||||
catch: /.*\[dest\] unknown field \[version\].*/
|
||||
reindex:
|
||||
body:
|
||||
dest:
|
||||
version: 10
|
||||
|
||||
---
|
||||
"bad conflicts is error":
|
||||
- do:
|
||||
catch: /.*conflicts may only be "proceed" or "abort" but was \[cat\]/
|
||||
reindex:
|
||||
body:
|
||||
conflicts: cat
|
||||
|
||||
---
|
||||
"invalid size fails":
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: test
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
catch: /size should be greater than 0 if the request is limited to some number of documents or -1 if it isn't but it was \[-4\]/
|
||||
reindex:
|
||||
body:
|
||||
source:
|
||||
index: test
|
||||
dest:
|
||||
index: dest
|
||||
size: -4
|
||||
|
||||
---
|
||||
"can't set ttl":
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: test
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
catch: /setting ttl on destination isn't supported. use scripts instead./
|
||||
reindex:
|
||||
body:
|
||||
source:
|
||||
index: test
|
||||
dest:
|
||||
index: dest
|
||||
ttl: 3m
|
||||
|
||||
---
|
||||
"can't set timestamp":
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: test
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
catch: /setting timestamp on destination isn't supported. use scripts instead./
|
||||
reindex:
|
||||
body:
|
||||
source:
|
||||
index: test
|
||||
dest:
|
||||
index: dest
|
||||
timestamp: "123"
|
@ -0,0 +1,72 @@
|
||||
---
|
||||
"Can limit copied docs by specifying a query":
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: test
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: test
|
||||
id: 2
|
||||
body: { "text": "junk" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
index: test
|
||||
query:
|
||||
match:
|
||||
text: test
|
||||
dest:
|
||||
index: target
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: target
|
||||
- match: { hits.total: 1 }
|
||||
|
||||
---
|
||||
"Sorting and size combined":
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: test
|
||||
id: 1
|
||||
body: { "order": 1 }
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: test
|
||||
id: 2
|
||||
body: { "order": 2 }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
size: 1
|
||||
source:
|
||||
index: test
|
||||
sort: order
|
||||
dest:
|
||||
index: target
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: target
|
||||
- match: { hits.total: 1 }
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: target
|
||||
q: order:1
|
||||
- match: { hits.total: 1 }
|
@ -0,0 +1,185 @@
|
||||
# This test relies on setting verion: 2, version_type: external on the source
|
||||
# of the reindex and then manipulates the versioning in the destination.
|
||||
# ReindexVersioningTests is a more thorough, java based version of these tests.
|
||||
|
||||
---
|
||||
"versioning defaults to overwrite":
|
||||
- do:
|
||||
index:
|
||||
index: src
|
||||
type: test
|
||||
id: 1
|
||||
body: { "company": "cat" }
|
||||
version: 2
|
||||
version_type: external
|
||||
- do:
|
||||
index:
|
||||
index: src
|
||||
type: test
|
||||
id: 2
|
||||
body: { "company": "cow" }
|
||||
- do:
|
||||
index:
|
||||
index: dest
|
||||
type: test
|
||||
id: 1
|
||||
body: { "company": "dog" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
index: src
|
||||
dest:
|
||||
index: dest
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: dest
|
||||
q: company:cat
|
||||
- match: { hits.total: 1 }
|
||||
- do:
|
||||
search:
|
||||
index: dest
|
||||
q: company:cow
|
||||
- match: { hits.total: 1 }
|
||||
|
||||
---
|
||||
"op_type can be set to create":
|
||||
- do:
|
||||
index:
|
||||
index: src
|
||||
type: test
|
||||
id: 1
|
||||
body: { "company": "cat" }
|
||||
version: 2
|
||||
version_type: external
|
||||
- do:
|
||||
index:
|
||||
index: src
|
||||
type: test
|
||||
id: 2
|
||||
body: { "company": "cow" }
|
||||
- do:
|
||||
index:
|
||||
index: dest
|
||||
type: test
|
||||
id: 1
|
||||
body: { "company": "dog" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
conflicts: proceed
|
||||
source:
|
||||
index: src
|
||||
dest:
|
||||
index: dest
|
||||
op_type: create
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: dest
|
||||
q: company:dog
|
||||
- match: { hits.total: 1 }
|
||||
- do:
|
||||
search:
|
||||
index: dest
|
||||
q: company:cow
|
||||
- match: { hits.total: 1 }
|
||||
|
||||
---
|
||||
"version_type=external has refresh semantics":
|
||||
- do:
|
||||
index:
|
||||
index: src
|
||||
type: test
|
||||
id: 1
|
||||
body: { "company": "cat" }
|
||||
version: 2
|
||||
version_type: external
|
||||
- do:
|
||||
index:
|
||||
index: src
|
||||
type: test
|
||||
id: 2
|
||||
body: { "company": "cow" }
|
||||
- do:
|
||||
index:
|
||||
index: dest
|
||||
type: test
|
||||
id: 1
|
||||
body: { "company": "dog" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
index: src
|
||||
dest:
|
||||
index: dest
|
||||
version_type: external
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: dest
|
||||
q: company:cat
|
||||
- match: { hits.total: 1 }
|
||||
- do:
|
||||
search:
|
||||
index: dest
|
||||
q: company:cow
|
||||
- match: { hits.total: 1 }
|
||||
|
||||
---
|
||||
"version_type=internal has overwrite semantics":
|
||||
- do:
|
||||
index:
|
||||
index: src
|
||||
type: test
|
||||
id: 1
|
||||
body: { "company": "cat" }
|
||||
- do:
|
||||
index:
|
||||
index: src
|
||||
type: test
|
||||
id: 2
|
||||
body: { "company": "cow" }
|
||||
- do:
|
||||
index:
|
||||
index: dest
|
||||
type: test
|
||||
id: 1
|
||||
body: { "company": "dog" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
index: src
|
||||
dest:
|
||||
index: dest
|
||||
version_type: internal
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: dest
|
||||
q: company:cat
|
||||
- match: { hits.total: 1 }
|
||||
- do:
|
||||
search:
|
||||
index: dest
|
||||
q: company:cow
|
||||
- match: { hits.total: 1 }
|
@ -0,0 +1,57 @@
|
||||
---
|
||||
"Set routing":
|
||||
- do:
|
||||
index:
|
||||
index: src
|
||||
type: test
|
||||
id: 1
|
||||
body: { "company": "cat" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
index: src
|
||||
dest:
|
||||
index: dest
|
||||
routing: =cat
|
||||
|
||||
- do:
|
||||
get:
|
||||
index: dest
|
||||
type: test
|
||||
id: 1
|
||||
routing: cat
|
||||
- match: { _routing: cat }
|
||||
|
||||
---
|
||||
"Discard routing":
|
||||
- do:
|
||||
index:
|
||||
index: src
|
||||
type: test
|
||||
id: 1
|
||||
body: { "company": "cat" }
|
||||
routing:
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
index: src
|
||||
dest:
|
||||
index: dest
|
||||
routing: discard
|
||||
|
||||
- do:
|
||||
get:
|
||||
index: dest
|
||||
type: test
|
||||
id: 1
|
||||
- is_false: _routing
|
@ -0,0 +1,50 @@
|
||||
---
|
||||
"can override consistency":
|
||||
- do:
|
||||
indices.create:
|
||||
index: dest
|
||||
body:
|
||||
settings:
|
||||
number_of_replicas: 5
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: yellow
|
||||
- do:
|
||||
index:
|
||||
index: src
|
||||
type: test
|
||||
id: 1
|
||||
body: {"text": "test"}
|
||||
consistency: one
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
catch: unavailable
|
||||
reindex:
|
||||
timeout: 1s
|
||||
body:
|
||||
source:
|
||||
index: src
|
||||
dest:
|
||||
index: dest
|
||||
- match:
|
||||
failures.0.cause.reason: /Not.enough.active.copies.to.meet.write.consistency.of.\[QUORUM\].\(have.1,.needed.4\)\..Timeout\:.\[1s\],.request:.BulkShardRequest.to.\[dest\].containing.\[1\].requests/
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
consistency: one
|
||||
body:
|
||||
source:
|
||||
index: src
|
||||
dest:
|
||||
index: dest
|
||||
- match: {failures: []}
|
||||
- match: {created: 1}
|
||||
- match: {version_conflicts: 0}
|
||||
|
||||
- do:
|
||||
get:
|
||||
index: dest
|
||||
type: test
|
||||
id: 1
|
@ -0,0 +1,187 @@
|
||||
---
|
||||
"Basic response":
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: foo
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
update-by-query:
|
||||
index: test
|
||||
- match: {updated: 1}
|
||||
- match: {version_conflicts: 0}
|
||||
- match: {batches: 1}
|
||||
- match: {failures: []}
|
||||
- match: {noops: 0}
|
||||
- is_true: took
|
||||
- is_false: created # This shouldn't be included in the response
|
||||
|
||||
---
|
||||
"Response for version conflict":
|
||||
- do:
|
||||
indices.create:
|
||||
index: test
|
||||
body:
|
||||
settings:
|
||||
index.refresh_interval: -1
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: foo
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
- do: # Creates a new version for reindex to miss on scan.
|
||||
index:
|
||||
index: test
|
||||
type: foo
|
||||
id: 1
|
||||
body: { "text": "test2" }
|
||||
|
||||
- do:
|
||||
catch: conflict
|
||||
update-by-query:
|
||||
index: test
|
||||
- match: {updated: 0}
|
||||
- match: {version_conflicts: 1}
|
||||
- match: {batches: 1}
|
||||
- match: {failures.0.index: test}
|
||||
- match: {failures.0.type: foo}
|
||||
- match: {failures.0.id: "1"}
|
||||
- match: {failures.0.status: 409}
|
||||
- match: {failures.0.cause.type: version_conflict_engine_exception}
|
||||
- match: {failures.0.cause.reason: "[foo][1]: version conflict, current version [2] is different than the one provided [1]"}
|
||||
- match: {failures.0.cause.shard: /\d+/}
|
||||
- match: {failures.0.cause.index: test}
|
||||
- is_true: took
|
||||
|
||||
---
|
||||
"Response for version conflict with conflicts=proceed":
|
||||
- do:
|
||||
indices.create:
|
||||
index: test
|
||||
body:
|
||||
settings:
|
||||
index.refresh_interval: -1
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: foo
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
- do: # Creates a new version for reindex to miss on scan.
|
||||
index:
|
||||
index: test
|
||||
type: foo
|
||||
id: 1
|
||||
body: { "text": "test2" }
|
||||
|
||||
- do:
|
||||
update-by-query:
|
||||
index: test
|
||||
conflicts: proceed
|
||||
- match: {updated: 0}
|
||||
- match: {version_conflicts: 1}
|
||||
- match: {batches: 1}
|
||||
- match: {noops: 0}
|
||||
- match: {failures: []}
|
||||
- is_true: took
|
||||
|
||||
---
|
||||
"Limit by query":
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 2
|
||||
body: { "user": "junk" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
update-by-query:
|
||||
index: twitter
|
||||
body:
|
||||
query:
|
||||
match:
|
||||
user: kimchy
|
||||
- match: {updated: 1}
|
||||
- match: {version_conflicts: 0}
|
||||
- match: {batches: 1}
|
||||
- match: {failures: []}
|
||||
- is_true: took
|
||||
|
||||
---
|
||||
"Limit by size":
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 2
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
update-by-query:
|
||||
index: twitter
|
||||
size: 1
|
||||
- match: {updated: 1}
|
||||
- match: {version_conflicts: 0}
|
||||
- match: {batches: 1}
|
||||
- match: {failures: []}
|
||||
- is_true: took
|
||||
|
||||
---
|
||||
"Can override scroll_size":
|
||||
- do:
|
||||
indices.create:
|
||||
index: test
|
||||
body:
|
||||
settings:
|
||||
number_of_shards: 1
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: yellow
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: foo
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: foo
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: foo
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
update-by-query:
|
||||
index: test
|
||||
scroll_size: 1
|
||||
- match: {batches: 3}
|
@ -0,0 +1,41 @@
|
||||
---
|
||||
"invalid conflicts fails":
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: test
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
catch: /conflicts may only be .* but was \[cat\]/
|
||||
update-by-query:
|
||||
index: test
|
||||
conflicts: cat
|
||||
|
||||
---
|
||||
"invalid scroll_size fails":
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: test
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
catch: /Failed to parse int parameter \[scroll_size\] with value \[cat\]/
|
||||
update-by-query:
|
||||
index: test
|
||||
scroll_size: cat
|
||||
|
||||
---
|
||||
"invalid size fails":
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: test
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
catch: /size should be greater than 0 if the request is limited to some number of documents or -1 if it isn't but it was \[-4\]/
|
||||
update-by-query:
|
||||
index: test
|
||||
size: -4
|
@ -0,0 +1,58 @@
|
||||
---
|
||||
"Update-by-query picks up new fields":
|
||||
- do:
|
||||
indices.create:
|
||||
index: test
|
||||
body:
|
||||
mappings:
|
||||
place:
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: yellow
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: place
|
||||
id: 1
|
||||
refresh: true
|
||||
body: { "name": "bob's house" }
|
||||
|
||||
- do:
|
||||
indices.put_mapping:
|
||||
index: test
|
||||
type: place
|
||||
body:
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
fields:
|
||||
english:
|
||||
type: string
|
||||
analyzer: english
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: test
|
||||
body:
|
||||
query:
|
||||
match:
|
||||
name.english: bob
|
||||
- match: { hits.total: 0 }
|
||||
|
||||
- do:
|
||||
update-by-query:
|
||||
index: test
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: test
|
||||
body:
|
||||
query:
|
||||
match:
|
||||
name.english: bob
|
||||
- match: { hits.total: 1 }
|
@ -0,0 +1,23 @@
|
||||
---
|
||||
"update-by-query increments the version number":
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: test
|
||||
id: 1
|
||||
body: {"text": "test"}
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
update-by-query:
|
||||
index: test
|
||||
- match: {updated: 1}
|
||||
- match: {version_conflicts: 0}
|
||||
|
||||
- do:
|
||||
get:
|
||||
index: test
|
||||
type: test
|
||||
id: 1
|
||||
- match: {_version: 2}
|
@ -0,0 +1,42 @@
|
||||
---
|
||||
"can override consistency":
|
||||
- do:
|
||||
indices.create:
|
||||
index: test
|
||||
body:
|
||||
settings:
|
||||
number_of_replicas: 5
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: yellow
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: test
|
||||
id: 1
|
||||
body: {"text": "test"}
|
||||
consistency: one
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
catch: unavailable
|
||||
update-by-query:
|
||||
index: test
|
||||
timeout: 1s
|
||||
- match:
|
||||
failures.0.cause.reason: /Not.enough.active.copies.to.meet.write.consistency.of.\[QUORUM\].\(have.1,.needed.4\)..Timeout\:.\[1s\],.request:.BulkShardRequest.to.\[test\].containing.\[1\].requests/
|
||||
|
||||
- do:
|
||||
update-by-query:
|
||||
index: test
|
||||
consistency: one
|
||||
- match: {failures: []}
|
||||
- match: {updated: 1}
|
||||
- match: {version_conflicts: 0}
|
||||
|
||||
- do:
|
||||
get:
|
||||
index: test
|
||||
type: test
|
||||
id: 1
|
28
qa/smoke-test-reindex-with-groovy/build.gradle
Normal file
28
qa/smoke-test-reindex-with-groovy/build.gradle
Normal file
@ -0,0 +1,28 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
apply plugin: 'elasticsearch.rest-test'
|
||||
|
||||
integTest {
|
||||
cluster {
|
||||
systemProperty 'es.script.inline', 'on'
|
||||
plugin 'reindex', project(':plugins:reindex')
|
||||
plugin 'groovy', project(':plugins:lang-groovy')
|
||||
}
|
||||
}
|
@ -0,0 +1,39 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.smoketest;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.annotations.Name;
|
||||
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
|
||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||
import org.elasticsearch.test.rest.RestTestCandidate;
|
||||
import org.elasticsearch.test.rest.parser.RestTestParseException;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class SmokeTestReindexWithGroovyIT extends ESRestTestCase {
|
||||
public SmokeTestReindexWithGroovyIT(@Name("yaml") RestTestCandidate testCandidate) {
|
||||
super(testCandidate);
|
||||
}
|
||||
|
||||
@ParametersFactory
|
||||
public static Iterable<Object[]> parameters() throws IOException, RestTestParseException {
|
||||
return ESRestTestCase.createParameters(0, 1);
|
||||
}
|
||||
}
|
@ -0,0 +1,397 @@
|
||||
---
|
||||
"Modify a document":
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
index: twitter
|
||||
dest:
|
||||
index: new_twitter
|
||||
script:
|
||||
inline: ctx._source.user = "other" + ctx._source.user
|
||||
- match: {created: 1}
|
||||
- match: {noops: 0}
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: new_twitter
|
||||
body:
|
||||
query:
|
||||
match:
|
||||
user: otherkimchy
|
||||
- match: { hits.total: 1 }
|
||||
|
||||
---
|
||||
"Modify a document based on id":
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 2
|
||||
body: { "user": "blort" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
index: twitter
|
||||
dest:
|
||||
index: new_twitter
|
||||
script:
|
||||
inline: if (ctx._id == "1") {ctx._source.user = "other" + ctx._source.user}
|
||||
- match: {created: 2}
|
||||
- match: {noops: 0}
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: new_twitter
|
||||
body:
|
||||
query:
|
||||
match:
|
||||
user: otherkimchy
|
||||
- match: { hits.total: 1 }
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: new_twitter
|
||||
body:
|
||||
query:
|
||||
match:
|
||||
user: blort
|
||||
- match: { hits.total: 1 }
|
||||
|
||||
---
|
||||
"Add new parent":
|
||||
- do:
|
||||
indices.create:
|
||||
index: new_twitter
|
||||
body:
|
||||
mappings:
|
||||
tweet:
|
||||
_parent: { type: "user" }
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: yellow
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
index:
|
||||
index: new_twitter
|
||||
type: user
|
||||
id: kimchy
|
||||
body: { "name": "kimchy" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
index: twitter
|
||||
dest:
|
||||
index: new_twitter
|
||||
script:
|
||||
inline: ctx._parent = ctx._source.user
|
||||
- match: {created: 1}
|
||||
- match: {noops: 0}
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: new_twitter
|
||||
body:
|
||||
query:
|
||||
has_parent:
|
||||
parent_type: user
|
||||
query:
|
||||
match:
|
||||
name: kimchy
|
||||
- match: { hits.total: 1 }
|
||||
- match: { hits.hits.0._source.user: kimchy }
|
||||
|
||||
---
|
||||
"Add routing":
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 2
|
||||
body: { "user": "foo" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
index: twitter
|
||||
dest:
|
||||
index: new_twitter
|
||||
script:
|
||||
inline: ctx._routing = ctx._source.user
|
||||
- match: {created: 2}
|
||||
- match: {noops: 0}
|
||||
|
||||
- do:
|
||||
get:
|
||||
index: new_twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
routing: kimchy
|
||||
- match: { _routing: kimchy }
|
||||
|
||||
- do:
|
||||
get:
|
||||
index: new_twitter
|
||||
type: tweet
|
||||
id: 2
|
||||
routing: foo
|
||||
- match: { _routing: foo }
|
||||
|
||||
---
|
||||
"Add routing and parent":
|
||||
- do:
|
||||
indices.create:
|
||||
index: new_twitter
|
||||
body:
|
||||
mappings:
|
||||
tweet:
|
||||
_parent: { type: "user" }
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: yellow
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
index:
|
||||
index: new_twitter
|
||||
type: user
|
||||
id: kimchy
|
||||
body: { "name": "kimchy" }
|
||||
routing: cat
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
index: twitter
|
||||
dest:
|
||||
index: new_twitter
|
||||
script:
|
||||
inline: ctx._parent = ctx._source.user; ctx._routing = "cat"
|
||||
- match: {created: 1}
|
||||
- match: {noops: 0}
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: new_twitter
|
||||
routing: cat
|
||||
body:
|
||||
query:
|
||||
has_parent:
|
||||
parent_type: user
|
||||
query:
|
||||
match:
|
||||
name: kimchy
|
||||
- match: { hits.total: 1 }
|
||||
- match: { hits.hits.0._source.user: kimchy }
|
||||
- match: { hits.hits.0._routing: cat }
|
||||
|
||||
---
|
||||
"Noop one doc":
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 2
|
||||
body: { "user": "foo" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
index: twitter
|
||||
dest:
|
||||
index: new_twitter
|
||||
script:
|
||||
inline: if (ctx._source.user == "kimchy") {ctx._source.user = "not" + ctx._source.user} else {ctx.op = "noop"}
|
||||
- match: {created: 1}
|
||||
- match: {noops: 1}
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: new_twitter
|
||||
body:
|
||||
query:
|
||||
match:
|
||||
user: notkimchy
|
||||
- match: { hits.total: 1 }
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: twitter
|
||||
body:
|
||||
query:
|
||||
match:
|
||||
user: notfoo
|
||||
- match: { hits.total: 0 }
|
||||
|
||||
---
|
||||
"Noop all docs":
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 2
|
||||
body: { "user": "foo" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
body:
|
||||
source:
|
||||
index: twitter
|
||||
dest:
|
||||
index: new_twitter
|
||||
script:
|
||||
inline: ctx.op = "noop"
|
||||
- match: {updated: 0}
|
||||
- match: {noops: 2}
|
||||
|
||||
---
|
||||
"Set version to null to force an update":
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
version: 1
|
||||
version_type: external
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
index:
|
||||
index: new_twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
version: 1
|
||||
version_type: external
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
index: twitter
|
||||
dest:
|
||||
index: new_twitter
|
||||
version_type: external
|
||||
script:
|
||||
inline: ctx._source.user = "other" + ctx._source.user; ctx._version = null
|
||||
- match: {updated: 1}
|
||||
- match: {noops: 0}
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: new_twitter
|
||||
body:
|
||||
query:
|
||||
match:
|
||||
user: otherkimchy
|
||||
- match: { hits.total: 1 }
|
||||
|
||||
---
|
||||
"Set id to null to get an automatic id":
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
index:
|
||||
index: new_twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
index: twitter
|
||||
dest:
|
||||
index: new_twitter
|
||||
script:
|
||||
inline: ctx._source.user = "other" + ctx._source.user; ctx._id = null
|
||||
- match: {created: 1}
|
||||
- match: {noops: 0}
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: new_twitter
|
||||
body:
|
||||
query:
|
||||
match:
|
||||
user: otherkimchy
|
||||
- match: { hits.total: 1 }
|
@ -0,0 +1,140 @@
|
||||
---
|
||||
"Update a document using update-by-query":
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
update-by-query:
|
||||
index: twitter
|
||||
refresh: true
|
||||
body:
|
||||
script:
|
||||
inline: ctx._source.user = "not" + ctx._source.user
|
||||
- match: {updated: 1}
|
||||
- match: {noops: 0}
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: twitter
|
||||
body:
|
||||
query:
|
||||
match:
|
||||
user: notkimchy
|
||||
- match: { hits.total: 1 }
|
||||
|
||||
---
|
||||
"Noop one doc":
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 2
|
||||
body: { "user": "foo" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
update-by-query:
|
||||
refresh: true
|
||||
index: twitter
|
||||
body:
|
||||
script:
|
||||
inline: if (ctx._source.user == "kimchy") {ctx._source.user = "not" + ctx._source.user} else {ctx.op = "noop"}
|
||||
- match: {updated: 1}
|
||||
- match: {noops: 1}
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: twitter
|
||||
body:
|
||||
query:
|
||||
match:
|
||||
user: notkimchy
|
||||
- match: { hits.total: 1 }
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: twitter
|
||||
body:
|
||||
query:
|
||||
match:
|
||||
user: notfoo
|
||||
- match: { hits.total: 0 }
|
||||
|
||||
---
|
||||
"Noop all docs":
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 2
|
||||
body: { "user": "foo" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
update-by-query:
|
||||
refresh: true
|
||||
index: twitter
|
||||
body:
|
||||
script:
|
||||
inline: ctx.op = "noop"
|
||||
- match: {updated: 0}
|
||||
- match: {noops: 2}
|
||||
- match: {batches: 1}
|
||||
|
||||
---
|
||||
"Setting bogus ctx is an error":
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
catch: /Invalid fields added to ctx \[junk\]/
|
||||
update-by-query:
|
||||
index: twitter
|
||||
body:
|
||||
script:
|
||||
inline: ctx.junk = "stuff"
|
||||
|
||||
---
|
||||
"Can't change _id":
|
||||
- do:
|
||||
index:
|
||||
index: twitter
|
||||
type: tweet
|
||||
id: 1
|
||||
body: { "user": "kimchy" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
catch: /Modifying \[_id\] not allowed/
|
||||
update-by-query:
|
||||
index: twitter
|
||||
body:
|
||||
script:
|
||||
inline: ctx._id = "stuff"
|
@ -251,6 +251,10 @@ fi
|
||||
install_and_check_plugin mapper size
|
||||
}
|
||||
|
||||
@test "[$GROUP] install reindex plugin" {
|
||||
install_and_check_plugin - reindex
|
||||
}
|
||||
|
||||
@test "[$GROUP] install repository-azure plugin" {
|
||||
install_and_check_plugin repository azure azure-storage-*.jar
|
||||
}
|
||||
@ -359,6 +363,10 @@ fi
|
||||
remove_plugin mapper-size
|
||||
}
|
||||
|
||||
@test "[$GROUP] remove reindex plugin" {
|
||||
remove_plugin reindex
|
||||
}
|
||||
|
||||
@test "[$GROUP] remove repository-azure plugin" {
|
||||
remove_plugin repository-azure
|
||||
}
|
||||
|
@ -25,6 +25,7 @@ List projects = [
|
||||
'plugins:mapper-attachments',
|
||||
'plugins:mapper-murmur3',
|
||||
'plugins:mapper-size',
|
||||
'plugins:reindex',
|
||||
'plugins:repository-azure',
|
||||
'plugins:repository-s3',
|
||||
'plugins:jvm-example',
|
||||
@ -33,6 +34,7 @@ List projects = [
|
||||
'qa:evil-tests',
|
||||
'qa:smoke-test-client',
|
||||
'qa:smoke-test-multinode',
|
||||
'qa:smoke-test-reindex-with-groovy',
|
||||
'qa:smoke-test-plugins',
|
||||
'qa:vagrant',
|
||||
]
|
||||
@ -80,4 +82,3 @@ if (xplugins.exists()) {
|
||||
addSubProjects(':x-plugins', extraPluginDir)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -17,7 +17,7 @@
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.rest;
|
||||
package org.elasticsearch.test.client;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.Action;
|
@ -140,6 +140,7 @@ public class DoSection implements ExecutableSection {
|
||||
catches.put("conflict", tuple("409", equalTo(409)));
|
||||
catches.put("forbidden", tuple("403", equalTo(403)));
|
||||
catches.put("request_timeout", tuple("408", equalTo(408)));
|
||||
catches.put("unavailable", tuple("503", equalTo(503)));
|
||||
catches.put("request", tuple("4xx|5xx", allOf(greaterThanOrEqualTo(400), not(equalTo(404)), not(equalTo(408)), not(equalTo(409)), not(equalTo(403)))));
|
||||
}
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user