diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml
index 268ee077c34..920fda8d177 100644
--- a/buildSrc/src/main/resources/checkstyle_suppressions.xml
+++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml
@@ -746,7 +746,6 @@
-
diff --git a/core/src/main/java/org/elasticsearch/action/ActionListener.java b/core/src/main/java/org/elasticsearch/action/ActionListener.java
index 0b3a69bb811..8447d6cef08 100644
--- a/core/src/main/java/org/elasticsearch/action/ActionListener.java
+++ b/core/src/main/java/org/elasticsearch/action/ActionListener.java
@@ -21,18 +21,16 @@ package org.elasticsearch.action;
/**
* A listener for action responses or failures.
- *
- *
*/
public interface ActionListener {
-
/**
- * A response handler.
+ * Handle action response. This response may constitute a failure or a
+ * success but it is up to the listener to make that decision.
*/
void onResponse(Response response);
/**
- * A failure handler.
+ * A failure caused by an exception at some phase of the task.
*/
void onFailure(Throwable e);
}
diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java
index b8d09583f69..5d01c48ce8b 100644
--- a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java
+++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java
@@ -28,7 +28,9 @@ import org.elasticsearch.action.update.UpdateResponse;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.StatusToXContent;
+import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.rest.RestStatus;
@@ -76,7 +78,15 @@ public class BulkItemResponse implements Streamable, StatusToXContent {
/**
* Represents a failure.
*/
- public static class Failure {
+ public static class Failure implements Writeable, ToXContent {
+ static final String INDEX_FIELD = "index";
+ static final String TYPE_FIELD = "type";
+ static final String ID_FIELD = "id";
+ static final String CAUSE_FIELD = "cause";
+ static final String STATUS_FIELD = "status";
+
+ public static final Failure PROTOTYPE = new Failure(null, null, null, null);
+
private final String index;
private final String type;
private final String id;
@@ -126,9 +136,39 @@ public class BulkItemResponse implements Streamable, StatusToXContent {
return this.status;
}
+ /**
+ * The actual cause of the failure.
+ */
public Throwable getCause() {
return cause;
}
+
+ @Override
+ public Failure readFrom(StreamInput in) throws IOException {
+ return new Failure(in.readString(), in.readString(), in.readOptionalString(), in.readThrowable());
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(getIndex());
+ out.writeString(getType());
+ out.writeOptionalString(getId());
+ out.writeThrowable(getCause());
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.field(INDEX_FIELD, index);
+ builder.field(TYPE_FIELD, type);
+ if (id != null) {
+ builder.field(ID_FIELD, id);
+ }
+ builder.startObject(CAUSE_FIELD);
+ ElasticsearchException.toXContent(builder, params, cause);
+ builder.endObject();
+ builder.field(STATUS_FIELD, status.getStatus());
+ return builder;
+ }
}
private int id;
@@ -265,11 +305,7 @@ public class BulkItemResponse implements Streamable, StatusToXContent {
}
if (in.readBoolean()) {
- String fIndex = in.readString();
- String fType = in.readString();
- String fId = in.readOptionalString();
- Throwable throwable = in.readThrowable();
- failure = new Failure(fIndex, fType, fId, throwable);
+ failure = Failure.PROTOTYPE.readFrom(in);
}
}
@@ -294,10 +330,7 @@ public class BulkItemResponse implements Streamable, StatusToXContent {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
- out.writeString(failure.getIndex());
- out.writeString(failure.getType());
- out.writeOptionalString(failure.getId());
- out.writeThrowable(failure.getCause());
+ failure.writeTo(out);
}
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java
index 275e2819cf6..874789e8d61 100644
--- a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java
@@ -94,6 +94,12 @@ public class BulkShardRequest extends ReplicationRequest {
@Override
public String toString() {
- return "shard bulk {" + super.toString() + "}";
+ // This is included in error messages so we'll try to make it somewhat user friendly.
+ StringBuilder b = new StringBuilder("BulkShardRequest to [");
+ b.append(index).append("] containing [").append(items.length).append("] requests");
+ if (refresh) {
+ b.append(" and a refresh");
+ }
+ return b.toString();
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/bulk/Retry.java b/core/src/main/java/org/elasticsearch/action/bulk/Retry.java
index 72e0da71921..acaa784ac87 100644
--- a/core/src/main/java/org/elasticsearch/action/bulk/Retry.java
+++ b/core/src/main/java/org/elasticsearch/action/bulk/Retry.java
@@ -38,7 +38,7 @@ import java.util.function.Predicate;
/**
* Encapsulates synchronous and asynchronous retry logic.
*/
-class Retry {
+public class Retry {
private final Class extends Throwable> retryOnThrowable;
private BackoffPolicy backoffPolicy;
diff --git a/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java
index 33bf17f0653..94614fb01d9 100644
--- a/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java
@@ -223,6 +223,13 @@ public class IndexRequest extends ReplicationRequest implements Do
return validationException;
}
+ /**
+ * The content type that will be used when generating a document from user provided objects like Maps.
+ */
+ public XContentType getContentType() {
+ return contentType;
+ }
+
/**
* Sets the content type that will be used when generating a document from user provided objects (like Map).
*/
@@ -294,6 +301,7 @@ public class IndexRequest extends ReplicationRequest implements Do
return this;
}
+ @Override
public String parent() {
return this.parent;
}
@@ -645,7 +653,7 @@ public class IndexRequest extends ReplicationRequest implements Do
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
- type = in.readString();
+ type = in.readOptionalString();
id = in.readOptionalString();
routing = in.readOptionalString();
parent = in.readOptionalString();
@@ -663,7 +671,7 @@ public class IndexRequest extends ReplicationRequest implements Do
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
- out.writeString(type);
+ out.writeOptionalString(type);
out.writeOptionalString(id);
out.writeOptionalString(routing);
out.writeOptionalString(parent);
diff --git a/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java
index d4ae139ee0c..830a54778e1 100644
--- a/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java
+++ b/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java
@@ -39,7 +39,7 @@ import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.SearchPhaseResult;
import org.elasticsearch.search.SearchShardTarget;
-import org.elasticsearch.search.action.SearchServiceTransportAction;
+import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.fetch.ShardFetchSearchRequest;
import org.elasticsearch.search.internal.InternalSearchResponse;
@@ -58,7 +58,7 @@ import static org.elasticsearch.action.search.TransportSearchHelper.internalSear
abstract class AbstractSearchAsyncAction extends AbstractAsyncAction {
protected final ESLogger logger;
- protected final SearchServiceTransportAction searchService;
+ protected final SearchTransportService searchTransportService;
private final IndexNameExpressionResolver indexNameExpressionResolver;
protected final SearchPhaseController searchPhaseController;
protected final ThreadPool threadPool;
@@ -76,12 +76,12 @@ abstract class AbstractSearchAsyncAction
private final Object shardFailuresMutex = new Object();
protected volatile ScoreDoc[] sortedShardList;
- protected AbstractSearchAsyncAction(ESLogger logger, SearchServiceTransportAction searchService, ClusterService clusterService,
+ protected AbstractSearchAsyncAction(ESLogger logger, SearchTransportService searchTransportService, ClusterService clusterService,
IndexNameExpressionResolver indexNameExpressionResolver,
SearchPhaseController searchPhaseController, ThreadPool threadPool, SearchRequest request,
ActionListener listener) {
this.logger = logger;
- this.searchService = searchService;
+ this.searchTransportService = searchTransportService;
this.indexNameExpressionResolver = indexNameExpressionResolver;
this.searchPhaseController = searchPhaseController;
this.threadPool = threadPool;
@@ -332,7 +332,7 @@ abstract class AbstractSearchAsyncAction
protected void sendReleaseSearchContext(long contextId, DiscoveryNode node) {
if (node != null) {
- searchService.sendFreeContext(node, contextId, request);
+ searchTransportService.sendFreeContext(node, contextId, request);
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java
index b04b18f735b..56d0fedd40c 100644
--- a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java
+++ b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java
@@ -26,7 +26,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.AtomicArray;
-import org.elasticsearch.search.action.SearchServiceTransportAction;
+import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.dfs.AggregatedDfs;
import org.elasticsearch.search.dfs.DfsSearchResult;
@@ -43,11 +43,12 @@ class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction queryFetchResults;
- SearchDfsQueryAndFetchAsyncAction(ESLogger logger, SearchServiceTransportAction searchService,
+ SearchDfsQueryAndFetchAsyncAction(ESLogger logger, SearchTransportService searchTransportService,
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
SearchPhaseController searchPhaseController, ThreadPool threadPool,
SearchRequest request, ActionListener listener) {
- super(logger, searchService, clusterService, indexNameExpressionResolver, searchPhaseController, threadPool, request, listener);
+ super(logger, searchTransportService, clusterService, indexNameExpressionResolver, searchPhaseController, threadPool,
+ request, listener);
queryFetchResults = new AtomicArray<>(firstResults.length());
}
@@ -59,7 +60,7 @@ class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction listener) {
- searchService.sendExecuteDfs(node, request, listener);
+ searchTransportService.sendExecuteDfs(node, request, listener);
}
@Override
@@ -77,7 +78,7 @@ class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction() {
+ searchTransportService.sendExecuteFetch(node, querySearchRequest, new ActionListener() {
@Override
public void onResponse(QueryFetchSearchResult result) {
result.shardTarget(dfsResult.shardTarget());
diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java
index 76337334caa..f2dcefa7554 100644
--- a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java
+++ b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java
@@ -29,7 +29,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.SearchShardTarget;
-import org.elasticsearch.search.action.SearchServiceTransportAction;
+import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.dfs.AggregatedDfs;
import org.elasticsearch.search.dfs.DfsSearchResult;
@@ -50,11 +50,12 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction fetchResults;
final AtomicArray docIdsToLoad;
- SearchDfsQueryThenFetchAsyncAction(ESLogger logger, SearchServiceTransportAction searchService,
+ SearchDfsQueryThenFetchAsyncAction(ESLogger logger, SearchTransportService searchTransportService,
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
SearchPhaseController searchPhaseController, ThreadPool threadPool,
SearchRequest request, ActionListener listener) {
- super(logger, searchService, clusterService, indexNameExpressionResolver, searchPhaseController, threadPool, request, listener);
+ super(logger, searchTransportService, clusterService, indexNameExpressionResolver, searchPhaseController, threadPool,
+ request, listener);
queryResults = new AtomicArray<>(firstResults.length());
fetchResults = new AtomicArray<>(firstResults.length());
docIdsToLoad = new AtomicArray<>(firstResults.length());
@@ -68,7 +69,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction listener) {
- searchService.sendExecuteDfs(node, request, listener);
+ searchTransportService.sendExecuteDfs(node, request, listener);
}
@Override
@@ -85,7 +86,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction() {
+ searchTransportService.sendExecuteQuery(node, querySearchRequest, new ActionListener() {
@Override
public void onResponse(QuerySearchResult result) {
result.shardTarget(dfsResult.shardTarget());
@@ -157,7 +158,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction() {
+ searchTransportService.sendExecuteFetch(node, fetchSearchRequest, new ActionListener() {
@Override
public void onResponse(FetchSearchResult result) {
result.shardTarget(shardTarget);
diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java
index 5187e77f0e7..dcbf9b5091f 100644
--- a/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java
+++ b/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java
@@ -25,7 +25,7 @@ import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.logging.ESLogger;
-import org.elasticsearch.search.action.SearchServiceTransportAction;
+import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.fetch.QueryFetchSearchResult;
import org.elasticsearch.search.internal.InternalSearchResponse;
@@ -36,11 +36,12 @@ import java.io.IOException;
class SearchQueryAndFetchAsyncAction extends AbstractSearchAsyncAction {
- SearchQueryAndFetchAsyncAction(ESLogger logger, SearchServiceTransportAction searchService,
+ SearchQueryAndFetchAsyncAction(ESLogger logger, SearchTransportService searchTransportService,
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
SearchPhaseController searchPhaseController, ThreadPool threadPool,
SearchRequest request, ActionListener listener) {
- super(logger, searchService, clusterService, indexNameExpressionResolver, searchPhaseController, threadPool, request, listener);
+ super(logger, searchTransportService, clusterService, indexNameExpressionResolver, searchPhaseController, threadPool,
+ request, listener);
}
@Override
@@ -51,7 +52,7 @@ class SearchQueryAndFetchAsyncAction extends AbstractSearchAsyncAction listener) {
- searchService.sendExecuteFetch(node, request, listener);
+ searchTransportService.sendExecuteFetch(node, request, listener);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java
index 84f93590f23..e15b9da8acb 100644
--- a/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java
+++ b/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java
@@ -29,7 +29,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.SearchShardTarget;
-import org.elasticsearch.search.action.SearchServiceTransportAction;
+import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.fetch.FetchSearchResult;
import org.elasticsearch.search.fetch.ShardFetchSearchRequest;
@@ -46,7 +46,7 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction fetchResults;
final AtomicArray docIdsToLoad;
- SearchQueryThenFetchAsyncAction(ESLogger logger, SearchServiceTransportAction searchService,
+ SearchQueryThenFetchAsyncAction(ESLogger logger, SearchTransportService searchService,
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
SearchPhaseController searchPhaseController, ThreadPool threadPool,
SearchRequest request, ActionListener listener) {
@@ -63,7 +63,7 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction listener) {
- searchService.sendExecuteQuery(node, request, listener);
+ searchTransportService.sendExecuteQuery(node, request, listener);
}
@Override
@@ -91,7 +91,7 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction() {
+ searchTransportService.sendExecuteFetch(node, fetchSearchRequest, new ActionListener() {
@Override
public void onResponse(FetchSearchResult result) {
result.shardTarget(shardTarget);
diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java
index e8fe59cc447..b5b95dc5cbe 100644
--- a/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java
+++ b/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java
@@ -26,7 +26,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.AtomicArray;
-import org.elasticsearch.search.action.SearchServiceTransportAction;
+import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.fetch.QueryFetchSearchResult;
import org.elasticsearch.search.fetch.ScrollQueryFetchSearchResult;
@@ -42,7 +42,7 @@ class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction {
private final ESLogger logger;
private final SearchPhaseController searchPhaseController;
- private final SearchServiceTransportAction searchService;
+ private final SearchTransportService searchTransportService;
private final SearchScrollRequest request;
private final ActionListener listener;
private final ParsedScrollId scrollId;
@@ -53,11 +53,11 @@ class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction {
private final AtomicInteger counter;
SearchScrollQueryAndFetchAsyncAction(ESLogger logger, ClusterService clusterService,
- SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController,
+ SearchTransportService searchTransportService, SearchPhaseController searchPhaseController,
SearchScrollRequest request, ParsedScrollId scrollId, ActionListener listener) {
this.logger = logger;
this.searchPhaseController = searchPhaseController;
- this.searchService = searchService;
+ this.searchTransportService = searchTransportService;
this.request = request;
this.listener = listener;
this.scrollId = scrollId;
@@ -128,7 +128,7 @@ class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction {
void executePhase(final int shardIndex, DiscoveryNode node, final long searchId) {
InternalScrollSearchRequest internalRequest = internalScrollSearchRequest(searchId, request);
- searchService.sendExecuteFetch(node, internalRequest, new ActionListener() {
+ searchTransportService.sendExecuteFetch(node, internalRequest, new ActionListener() {
@Override
public void onResponse(ScrollQueryFetchSearchResult result) {
queryFetchResults.set(shardIndex, result.result());
diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java
index 0efff74524d..864f17eee2c 100644
--- a/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java
+++ b/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java
@@ -27,7 +27,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.AtomicArray;
-import org.elasticsearch.search.action.SearchServiceTransportAction;
+import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.fetch.FetchSearchResult;
import org.elasticsearch.search.fetch.ShardFetchRequest;
@@ -44,7 +44,7 @@ import static org.elasticsearch.action.search.TransportSearchHelper.internalScro
class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
private final ESLogger logger;
- private final SearchServiceTransportAction searchService;
+ private final SearchTransportService searchTransportService;
private final SearchPhaseController searchPhaseController;
private final SearchScrollRequest request;
private final ActionListener listener;
@@ -57,10 +57,10 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
private final AtomicInteger successfulOps;
SearchScrollQueryThenFetchAsyncAction(ESLogger logger, ClusterService clusterService,
- SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController,
+ SearchTransportService searchTransportService, SearchPhaseController searchPhaseController,
SearchScrollRequest request, ParsedScrollId scrollId, ActionListener listener) {
this.logger = logger;
- this.searchService = searchService;
+ this.searchTransportService = searchTransportService;
this.searchPhaseController = searchPhaseController;
this.request = request;
this.listener = listener;
@@ -124,7 +124,7 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
private void executeQueryPhase(final int shardIndex, final AtomicInteger counter, DiscoveryNode node, final long searchId) {
InternalScrollSearchRequest internalRequest = internalScrollSearchRequest(searchId, request);
- searchService.sendExecuteQuery(node, internalRequest, new ActionListener() {
+ searchTransportService.sendExecuteQuery(node, internalRequest, new ActionListener() {
@Override
public void onResponse(ScrollQuerySearchResult result) {
queryResults.set(shardIndex, result.queryResult());
@@ -182,7 +182,7 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
ScoreDoc lastEmittedDoc = lastEmittedDocPerShard[entry.index];
ShardFetchRequest shardFetchRequest = new ShardFetchRequest(querySearchResult.id(), docIds, lastEmittedDoc);
DiscoveryNode node = nodes.get(querySearchResult.shardTarget().nodeId());
- searchService.sendExecuteFetchScroll(node, shardFetchRequest, new ActionListener() {
+ searchTransportService.sendExecuteFetchScroll(node, shardFetchRequest, new ActionListener() {
@Override
public void onResponse(FetchSearchResult result) {
result.shardTarget(querySearchResult.shardTarget());
diff --git a/core/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java b/core/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java
index a43f9302f3a..95f0796ba4f 100644
--- a/core/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java
+++ b/core/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java
@@ -30,7 +30,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.CountDown;
-import org.elasticsearch.search.action.SearchServiceTransportAction;
+import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportService;
@@ -47,15 +47,15 @@ import static org.elasticsearch.action.search.TransportSearchHelper.parseScrollI
public class TransportClearScrollAction extends HandledTransportAction {
private final ClusterService clusterService;
- private final SearchServiceTransportAction searchServiceTransportAction;
+ private final SearchTransportService searchTransportService;
@Inject
public TransportClearScrollAction(Settings settings, TransportService transportService, ThreadPool threadPool,
- ClusterService clusterService, SearchServiceTransportAction searchServiceTransportAction,
+ ClusterService clusterService, SearchTransportService searchTransportService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, ClearScrollAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, ClearScrollRequest::new);
this.clusterService = clusterService;
- this.searchServiceTransportAction = searchServiceTransportAction;
+ this.searchTransportService = searchTransportService;
}
@Override
@@ -64,10 +64,8 @@ public class TransportClearScrollAction extends HandledTransportAction contexts = new ArrayList<>();
final ActionListener listener;
final AtomicReference expHolder;
@@ -85,8 +83,6 @@ public class TransportClearScrollAction extends HandledTransportAction();
this.expectedOps = new CountDown(expectedOps);
@@ -100,7 +96,7 @@ public class TransportClearScrollAction extends HandledTransportAction() {
+ searchTransportService.sendClearAllScrollContexts(node, new ActionListener() {
@Override
public void onResponse(TransportResponse response) {
onFreedContext(true);
@@ -121,9 +117,9 @@ public class TransportClearScrollAction extends HandledTransportAction() {
+ searchTransportService.sendFreeContext(node, target.getScrollId(), new ActionListener() {
@Override
- public void onResponse(SearchServiceTransportAction.SearchFreeContextResponse freed) {
+ public void onResponse(SearchTransportService.SearchFreeContextResponse freed) {
onFreedContext(freed.isFreed());
}
diff --git a/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java
index 8e08350b694..e87fa2a345a 100644
--- a/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java
+++ b/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java
@@ -29,7 +29,7 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.indices.IndexClosedException;
-import org.elasticsearch.search.action.SearchServiceTransportAction;
+import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@@ -45,17 +45,17 @@ import static org.elasticsearch.action.search.SearchType.QUERY_AND_FETCH;
public class TransportSearchAction extends HandledTransportAction {
private final ClusterService clusterService;
- private final SearchServiceTransportAction searchService;
+ private final SearchTransportService searchTransportService;
private final SearchPhaseController searchPhaseController;
@Inject
public TransportSearchAction(Settings settings, ThreadPool threadPool, SearchPhaseController searchPhaseController,
- TransportService transportService, SearchServiceTransportAction searchService,
+ TransportService transportService, SearchTransportService searchTransportService,
ClusterService clusterService, ActionFilters actionFilters, IndexNameExpressionResolver
indexNameExpressionResolver) {
super(settings, SearchAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, SearchRequest::new);
this.searchPhaseController = searchPhaseController;
- this.searchService = searchService;
+ this.searchTransportService = searchTransportService;
this.clusterService = clusterService;
}
@@ -81,19 +81,19 @@ public class TransportSearchAction extends HandledTransportAction {
private final ClusterService clusterService;
- private final SearchServiceTransportAction searchService;
+ private final SearchTransportService searchTransportService;
private final SearchPhaseController searchPhaseController;
@Inject
public TransportSearchScrollAction(Settings settings, ThreadPool threadPool, TransportService transportService,
- ClusterService clusterService, SearchServiceTransportAction searchService,
+ ClusterService clusterService, SearchTransportService searchTransportService,
SearchPhaseController searchPhaseController,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, SearchScrollAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver,
SearchScrollRequest::new);
this.clusterService = clusterService;
- this.searchService = searchService;
+ this.searchTransportService = searchTransportService;
this.searchPhaseController = searchPhaseController;
}
@@ -63,11 +63,11 @@ public class TransportSearchScrollAction extends HandledTransportAction, Re
* This is a typical behavior.
*/
public final Task execute(Request request, ActionListener listener) {
+ /*
+ * While this version of execute could delegate to the TaskListener
+ * version of execute that'd add yet another layer of wrapping on the
+ * listener and prevent us from using the listener bare if there isn't a
+ * task. That just seems like too many objects. Thus the two versions of
+ * this method.
+ */
Task task = taskManager.register("transport", actionName, request);
if (task == null) {
execute(null, request, listener);
@@ -93,11 +101,32 @@ public abstract class TransportAction, Re
return task;
}
+ public final Task execute(Request request, TaskListener listener) {
+ Task task = taskManager.register("transport", actionName, request);
+ execute(task, request, new ActionListener() {
+ @Override
+ public void onResponse(Response response) {
+ if (task != null) {
+ taskManager.unregister(task);
+ }
+ listener.onResponse(task, response);
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ if (task != null) {
+ taskManager.unregister(task);
+ }
+ listener.onFailure(task, e);
+ }
+ });
+ return task;
+ }
+
/**
* Use this method when the transport action should continue to run in the context of the current task
*/
public final void execute(Task task, Request request, ActionListener listener) {
-
ActionRequestValidationException validationException = request.validate();
if (validationException != null) {
listener.onFailure(validationException);
diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java b/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java
index 0863fbfc4f4..e851b7814da 100644
--- a/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java
+++ b/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java
@@ -25,12 +25,12 @@ import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.Collections;
import java.util.List;
+import java.util.Objects;
/**
- *
+ * An event received by the local node, signaling that the cluster state has changed.
*/
public class ClusterChangedEvent {
@@ -43,6 +43,9 @@ public class ClusterChangedEvent {
private final DiscoveryNodes.Delta nodesDelta;
public ClusterChangedEvent(String source, ClusterState state, ClusterState previousState) {
+ Objects.requireNonNull(source, "source must not be null");
+ Objects.requireNonNull(state, "state must not be null");
+ Objects.requireNonNull(previousState, "previousState must not be null");
this.source = source;
this.state = state;
this.previousState = previousState;
@@ -56,19 +59,35 @@ public class ClusterChangedEvent {
return this.source;
}
+ /**
+ * The new cluster state that caused this change event.
+ */
public ClusterState state() {
return this.state;
}
+ /**
+ * The previous cluster state for this change event.
+ */
public ClusterState previousState() {
return this.previousState;
}
+ /**
+ * Returns true
iff the routing tables (for all indices) have
+ * changed between the previous cluster state and the current cluster state.
+ * Note that this is an object reference equality test, not an equals test.
+ */
public boolean routingTableChanged() {
return state.routingTable() != previousState.routingTable();
}
+ /**
+ * Returns true
iff the routing table has changed for the given index.
+ * Note that this is an object reference equality test, not an equals test.
+ */
public boolean indexRoutingTableChanged(String index) {
+ Objects.requireNonNull(index, "index must not be null");
if (!state.routingTable().hasIndex(index) && !previousState.routingTable().hasIndex(index)) {
return false;
}
@@ -82,9 +101,6 @@ public class ClusterChangedEvent {
* Returns the indices created in this event
*/
public List indicesCreated() {
- if (previousState == null) {
- return Arrays.asList(state.metaData().indices().keys().toArray(String.class));
- }
if (!metaDataChanged()) {
return Collections.emptyList();
}
@@ -105,20 +121,14 @@ public class ClusterChangedEvent {
* Returns the indices deleted in this event
*/
public List indicesDeleted() {
-
- // if the new cluster state has a new master then we cannot know if an index which is not in the cluster state
- // is actually supposed to be deleted or imported as dangling instead. for example a new master might not have
- // the index in its cluster state because it was started with an empty data folder and in this case we want to
- // import as dangling. we check here for new master too to be on the safe side in this case.
- // This means that under certain conditions deleted indices might be reimported if a master fails while the deletion
- // request is issued and a node receives the cluster state that would trigger the deletion from the new master.
- // See test MetaDataWriteDataNodesTests.testIndicesDeleted()
+ // If the new cluster state has a new cluster UUID, the likely scenario is that a node was elected
+ // master that has had its data directory wiped out, in which case we don't want to delete the indices and lose data;
+ // rather we want to import them as dangling indices instead. So we check here if the cluster UUID differs from the previous
+ // cluster UUID, in which case, we don't want to delete indices that the master erroneously believes shouldn't exist.
+ // See test DiscoveryWithServiceDisruptionsIT.testIndicesDeleted()
// See discussion on https://github.com/elastic/elasticsearch/pull/9952 and
// https://github.com/elastic/elasticsearch/issues/11665
- if (hasNewMaster() || previousState == null) {
- return Collections.emptyList();
- }
- if (!metaDataChanged()) {
+ if (metaDataChanged() == false || isNewCluster()) {
return Collections.emptyList();
}
List deleted = null;
@@ -134,10 +144,20 @@ public class ClusterChangedEvent {
return deleted == null ? Collections.emptyList() : deleted;
}
+ /**
+ * Returns true
iff the metadata for the cluster has changed between
+ * the previous cluster state and the new cluster state. Note that this is an object
+ * reference equality test, not an equals test.
+ */
public boolean metaDataChanged() {
return state.metaData() != previousState.metaData();
}
+ /**
+ * Returns true
iff the {@link IndexMetaData} for a given index
+ * has changed between the previous cluster state and the new cluster state.
+ * Note that this is an object reference equality test, not an equals test.
+ */
public boolean indexMetaDataChanged(IndexMetaData current) {
MetaData previousMetaData = previousState.metaData();
if (previousMetaData == null) {
@@ -152,46 +172,56 @@ public class ClusterChangedEvent {
return true;
}
+ /**
+ * Returns true
iff the cluster level blocks have changed between cluster states.
+ * Note that this is an object reference equality test, not an equals test.
+ */
public boolean blocksChanged() {
return state.blocks() != previousState.blocks();
}
+ /**
+ * Returns true
iff the local node is the mater node of the cluster.
+ */
public boolean localNodeMaster() {
return state.nodes().localNodeMaster();
}
+ /**
+ * Returns the {@link org.elasticsearch.cluster.node.DiscoveryNodes.Delta} between
+ * the previous cluster state and the new cluster state.
+ */
public DiscoveryNodes.Delta nodesDelta() {
return this.nodesDelta;
}
+ /**
+ * Returns true
iff nodes have been removed from the cluster since the last cluster state.
+ */
public boolean nodesRemoved() {
return nodesDelta.removed();
}
+ /**
+ * Returns true
iff nodes have been added from the cluster since the last cluster state.
+ */
public boolean nodesAdded() {
return nodesDelta.added();
}
+ /**
+ * Returns true
iff nodes have been changed (added or removed) from the cluster since the last cluster state.
+ */
public boolean nodesChanged() {
return nodesRemoved() || nodesAdded();
}
- /**
- * Checks if this cluster state comes from a different master than the previous one.
- * This is a workaround for the scenario where a node misses a cluster state that has either
- * no master block or state not recovered flag set. In this case we must make sure that
- * if an index is missing from the cluster state is not deleted immediately but instead imported
- * as dangling. See discussion on https://github.com/elastic/elasticsearch/pull/9952
- */
- private boolean hasNewMaster() {
- String oldMaster = previousState().getNodes().masterNodeId();
- String newMaster = state().getNodes().masterNodeId();
- if (oldMaster == null && newMaster == null) {
- return false;
- }
- if (oldMaster == null && newMaster != null) {
- return true;
- }
- return oldMaster.equals(newMaster) == false;
+ // Determines whether or not the current cluster state represents an entirely
+ // different cluster from the previous cluster state, which will happen when a
+ // master node is elected that has never been part of the cluster before.
+ private boolean isNewCluster() {
+ final String prevClusterUUID = previousState.metaData().clusterUUID();
+ final String currClusterUUID = state.metaData().clusterUUID();
+ return prevClusterUUID.equals(currClusterUUID) == false;
}
-}
\ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java
index d8504a210c1..44f2f4000bd 100644
--- a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java
+++ b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java
@@ -46,6 +46,11 @@ import static org.elasticsearch.common.transport.TransportAddressSerializers.add
*/
public class DiscoveryNode implements Streamable, ToXContent {
+ public static final String DATA_ATTR = "data";
+ public static final String MASTER_ATTR = "master";
+ public static final String CLIENT_ATTR = "client";
+ public static final String INGEST_ATTR = "ingest";
+
public static boolean localNode(Settings settings) {
if (Node.NODE_LOCAL_SETTING.exists(settings)) {
return Node.NODE_LOCAL_SETTING.get(settings);
@@ -274,7 +279,7 @@ public class DiscoveryNode implements Streamable, ToXContent {
* Should this node hold data (shards) or not.
*/
public boolean dataNode() {
- String data = attributes.get("data");
+ String data = attributes.get(DATA_ATTR);
if (data == null) {
return !clientNode();
}
@@ -292,7 +297,7 @@ public class DiscoveryNode implements Streamable, ToXContent {
* Is the node a client node or not.
*/
public boolean clientNode() {
- String client = attributes.get("client");
+ String client = attributes.get(CLIENT_ATTR);
return client != null && Booleans.parseBooleanExact(client);
}
@@ -304,7 +309,7 @@ public class DiscoveryNode implements Streamable, ToXContent {
* Can this node become master or not.
*/
public boolean masterNode() {
- String master = attributes.get("master");
+ String master = attributes.get(MASTER_ATTR);
if (master == null) {
return !clientNode();
}
@@ -322,7 +327,7 @@ public class DiscoveryNode implements Streamable, ToXContent {
* Returns a boolean that tells whether this an ingest node or not
*/
public boolean isIngestNode() {
- String ingest = attributes.get("ingest");
+ String ingest = attributes.get(INGEST_ATTR);
return ingest == null ? true : Booleans.parseBooleanExact(ingest);
}
diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java
index 1459c3d8be8..fa8b8c4ac41 100644
--- a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java
+++ b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java
@@ -219,6 +219,9 @@ public final class ClusterSettings extends AbstractScopedSettings {
HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED,
HttpTransportSettings.SETTING_PIPELINING,
HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN,
+ HttpTransportSettings.SETTING_HTTP_HOST,
+ HttpTransportSettings.SETTING_HTTP_PUBLISH_HOST,
+ HttpTransportSettings.SETTING_HTTP_BIND_HOST,
HttpTransportSettings.SETTING_HTTP_PORT,
HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT,
HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS,
diff --git a/core/src/main/java/org/elasticsearch/http/HttpTransportSettings.java b/core/src/main/java/org/elasticsearch/http/HttpTransportSettings.java
index 0e362615f0c..6c91df079b9 100644
--- a/core/src/main/java/org/elasticsearch/http/HttpTransportSettings.java
+++ b/core/src/main/java/org/elasticsearch/http/HttpTransportSettings.java
@@ -47,7 +47,7 @@ public final class HttpTransportSettings {
public static final Setting> SETTING_HTTP_BIND_HOST = listSetting("http.bind_host", SETTING_HTTP_HOST, s -> s, false, Scope.CLUSTER);
public static final Setting SETTING_HTTP_PORT = new Setting("http.port", "9200-9300", PortsRange::new, false, Scope.CLUSTER);
- public static final Setting SETTING_HTTP_PUBLISH_PORT = Setting.intSetting("http.publish_port", 0, 0, false, Scope.CLUSTER);
+ public static final Setting SETTING_HTTP_PUBLISH_PORT = Setting.intSetting("http.publish_port", -1, -1, false, Scope.CLUSTER);
public static final Setting SETTING_HTTP_DETAILED_ERRORS_ENABLED = Setting.boolSetting("http.detailed_errors.enabled", true, false, Scope.CLUSTER);
public static final Setting SETTING_HTTP_MAX_CONTENT_LENGTH = Setting.byteSizeSetting("http.max_content_length", new ByteSizeValue(100, ByteSizeUnit.MB), false, Scope.CLUSTER) ;
public static final Setting SETTING_HTTP_MAX_CHUNK_SIZE = Setting.byteSizeSetting("http.max_chunk_size", new ByteSizeValue(8, ByteSizeUnit.KB), false, Scope.CLUSTER) ;
diff --git a/core/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransport.java b/core/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransport.java
index 79927c27632..e64c6401f71 100644
--- a/core/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransport.java
+++ b/core/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransport.java
@@ -19,6 +19,8 @@
package org.elasticsearch.http.netty;
+import com.carrotsearch.hppc.IntHashSet;
+import com.carrotsearch.hppc.IntSet;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
@@ -192,8 +194,6 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent boundAddresses, InetAddress publishInetAddress) {
+ int publishPort = SETTING_HTTP_PUBLISH_PORT.get(settings);
+
+ if (publishPort < 0) {
for (InetSocketTransportAddress boundAddress : boundAddresses) {
InetAddress boundInetAddress = boundAddress.address().getAddress();
if (boundInetAddress.isAnyLocalAddress() || boundInetAddress.equals(publishInetAddress)) {
@@ -343,13 +354,23 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent implements MetaData.Custom {
+public final class IngestMetadata implements MetaData.Custom {
public final static String TYPE = "ingest";
public final static IngestMetadata PROTO = new IngestMetadata();
@@ -50,7 +51,6 @@ public final class IngestMetadata extends AbstractDiffable impl
INGEST_METADATA_PARSER.declareObjectArray(List::addAll , PipelineConfiguration.getParser(), PIPELINES_FIELD);
}
-
// We can't use Pipeline class directly in cluster state, because we don't have the processor factories around when
// IngestMetadata is registered as custom metadata.
private final Map pipelines;
@@ -73,7 +73,7 @@ public final class IngestMetadata extends AbstractDiffable impl
}
@Override
- public MetaData.Custom readFrom(StreamInput in) throws IOException {
+ public IngestMetadata readFrom(StreamInput in) throws IOException {
int size = in.readVInt();
Map pipelines = new HashMap<>(size);
for (int i = 0; i < size; i++) {
@@ -92,7 +92,7 @@ public final class IngestMetadata extends AbstractDiffable impl
}
@Override
- public MetaData.Custom fromXContent(XContentParser parser) throws IOException {
+ public IngestMetadata fromXContent(XContentParser parser) throws IOException {
Map pipelines = new HashMap<>();
List configs = INGEST_METADATA_PARSER.parse(parser);
for (PipelineConfiguration pipeline : configs) {
@@ -116,4 +116,52 @@ public final class IngestMetadata extends AbstractDiffable impl
return MetaData.API_AND_GATEWAY;
}
+ @Override
+ public Diff diff(MetaData.Custom before) {
+ return new IngestMetadataDiff((IngestMetadata) before, this);
+ }
+
+ @Override
+ public Diff readDiffFrom(StreamInput in) throws IOException {
+ return new IngestMetadataDiff(in);
+ }
+
+ static class IngestMetadataDiff implements Diff {
+
+ final Diff> pipelines;
+
+ IngestMetadataDiff(IngestMetadata before, IngestMetadata after) {
+ this.pipelines = DiffableUtils.diff(before.pipelines, after.pipelines, DiffableUtils.getStringKeySerializer());
+ }
+
+ public IngestMetadataDiff(StreamInput in) throws IOException {
+ pipelines = DiffableUtils.readJdkMapDiff(in, DiffableUtils.getStringKeySerializer(), PipelineConfiguration.PROTOTYPE);
+ }
+
+ @Override
+ public MetaData.Custom apply(MetaData.Custom part) {
+ return new IngestMetadata(pipelines.apply(((IngestMetadata) part).pipelines));
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ pipelines.writeTo(out);
+ }
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ IngestMetadata that = (IngestMetadata) o;
+
+ return pipelines.equals(that.pipelines);
+
+ }
+
+ @Override
+ public int hashCode() {
+ return pipelines.hashCode();
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java b/core/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java
index 3bd80edc306..d99d0e18c5c 100644
--- a/core/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java
+++ b/core/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java
@@ -19,6 +19,7 @@
package org.elasticsearch.ingest;
+import org.elasticsearch.cluster.AbstractDiffable;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
@@ -37,9 +38,10 @@ import java.util.function.BiFunction;
/**
* Encapsulates a pipeline's id and configuration as a blob
*/
-public final class PipelineConfiguration implements Writeable, ToXContent {
+public final class PipelineConfiguration extends AbstractDiffable
+ implements Writeable, ToXContent {
- private final static PipelineConfiguration PROTOTYPE = new PipelineConfiguration(null, null);
+ final static PipelineConfiguration PROTOTYPE = new PipelineConfiguration(null, null);
public static PipelineConfiguration readPipelineConfiguration(StreamInput in) throws IOException {
return PROTOTYPE.readFrom(in);
@@ -113,4 +115,22 @@ public final class PipelineConfiguration implements Writeable serviceAttributes = info.getServiceAttributes();
+ final Map serviceAttributes = info == null ? null : info.getServiceAttributes();
if (serviceAttributes != null) {
table.addCell(serviceAttributes.getOrDefault("http_address", "-"));
} else {
diff --git a/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java
index 8d3ef1e8ead..bd014bccde2 100644
--- a/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java
+++ b/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java
@@ -88,21 +88,35 @@ public class RestSearchAction extends BaseRestHandler {
@Override
public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws IOException {
- SearchRequest searchRequest;
- searchRequest = RestSearchAction.parseSearchRequest(queryRegistry, request, parseFieldMatcher, aggParsers);
+ SearchRequest searchRequest = new SearchRequest();
+ RestSearchAction.parseSearchRequest(searchRequest, queryRegistry, request, parseFieldMatcher, aggParsers, null);
client.search(searchRequest, new RestStatusToXContentListener<>(channel));
}
- public static SearchRequest parseSearchRequest(IndicesQueriesRegistry indicesQueriesRegistry, RestRequest request,
- ParseFieldMatcher parseFieldMatcher, AggregatorParsers aggParsers) throws IOException {
- String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
- SearchRequest searchRequest = new SearchRequest(indices);
+ /**
+ * Parses the rest request on top of the SearchRequest, preserving values
+ * that are not overridden by the rest request.
+ *
+ * @param restContent
+ * override body content to use for the request. If null body
+ * content is read from the request using
+ * RestAction.hasBodyContent.
+ */
+ public static void parseSearchRequest(SearchRequest searchRequest, IndicesQueriesRegistry indicesQueriesRegistry, RestRequest request,
+ ParseFieldMatcher parseFieldMatcher, AggregatorParsers aggParsers, BytesReference restContent) throws IOException {
+ if (searchRequest.source() == null) {
+ searchRequest.source(new SearchSourceBuilder());
+ }
+ searchRequest.indices(Strings.splitStringByCommaToArray(request.param("index")));
// get the content, and put it in the body
// add content/source as template if template flag is set
boolean isTemplateRequest = request.path().endsWith("/template");
- final SearchSourceBuilder builder;
- if (RestActions.hasBodyContent(request)) {
- BytesReference restContent = RestActions.getRestContent(request);
+ if (restContent == null) {
+ if (RestActions.hasBodyContent(request)) {
+ restContent = RestActions.getRestContent(request);
+ }
+ }
+ if (restContent != null) {
QueryParseContext context = new QueryParseContext(indicesQueriesRegistry);
if (isTemplateRequest) {
try (XContentParser parser = XContentFactory.xContent(restContent).createParser(restContent)) {
@@ -111,12 +125,10 @@ public class RestSearchAction extends BaseRestHandler {
Template template = TemplateQueryParser.parse(parser, context.parseFieldMatcher(), "params", "template");
searchRequest.template(template);
}
- builder = null;
} else {
- builder = RestActions.getRestSearchSource(restContent, indicesQueriesRegistry, parseFieldMatcher, aggParsers);
+ RestActions.parseRestSearchSource(searchRequest.source(), restContent, indicesQueriesRegistry, parseFieldMatcher,
+ aggParsers);
}
- } else {
- builder = null;
}
// do not allow 'query_and_fetch' or 'dfs_query_and_fetch' search types
@@ -129,15 +141,7 @@ public class RestSearchAction extends BaseRestHandler {
} else {
searchRequest.searchType(searchType);
}
- if (builder == null) {
- SearchSourceBuilder extraBuilder = new SearchSourceBuilder();
- if (parseSearchSource(extraBuilder, request)) {
- searchRequest.source(extraBuilder);
- }
- } else {
- parseSearchSource(builder, request);
- searchRequest.source(builder);
- }
+ parseSearchSource(searchRequest.source(), request);
searchRequest.requestCache(request.paramAsBoolean("request_cache", null));
String scroll = request.param("scroll");
@@ -149,41 +153,35 @@ public class RestSearchAction extends BaseRestHandler {
searchRequest.routing(request.param("routing"));
searchRequest.preference(request.param("preference"));
searchRequest.indicesOptions(IndicesOptions.fromRequest(request, searchRequest.indicesOptions()));
-
- return searchRequest;
}
- private static boolean parseSearchSource(final SearchSourceBuilder searchSourceBuilder, RestRequest request) {
-
- boolean modified = false;
+ /**
+ * Parses the rest request on top of the SearchSourceBuilder, preserving
+ * values that are not overridden by the rest request.
+ */
+ private static void parseSearchSource(final SearchSourceBuilder searchSourceBuilder, RestRequest request) {
QueryBuilder> queryBuilder = RestActions.urlParamsToQueryBuilder(request);
if (queryBuilder != null) {
searchSourceBuilder.query(queryBuilder);
- modified = true;
}
int from = request.paramAsInt("from", -1);
if (from != -1) {
searchSourceBuilder.from(from);
- modified = true;
}
int size = request.paramAsInt("size", -1);
if (size != -1) {
searchSourceBuilder.size(size);
- modified = true;
}
if (request.hasParam("explain")) {
searchSourceBuilder.explain(request.paramAsBoolean("explain", null));
- modified = true;
}
if (request.hasParam("version")) {
searchSourceBuilder.version(request.paramAsBoolean("version", null));
- modified = true;
}
if (request.hasParam("timeout")) {
searchSourceBuilder.timeout(request.paramAsTime("timeout", null));
- modified = true;
}
if (request.hasParam("terminate_after")) {
int terminateAfter = request.paramAsInt("terminate_after",
@@ -192,7 +190,6 @@ public class RestSearchAction extends BaseRestHandler {
throw new IllegalArgumentException("terminateAfter must be > 0");
} else if (terminateAfter > 0) {
searchSourceBuilder.terminateAfter(terminateAfter);
- modified = true;
}
}
@@ -200,13 +197,11 @@ public class RestSearchAction extends BaseRestHandler {
if (sField != null) {
if (!Strings.hasText(sField)) {
searchSourceBuilder.noFields();
- modified = true;
} else {
String[] sFields = Strings.splitStringByCommaToArray(sField);
if (sFields != null) {
for (String field : sFields) {
searchSourceBuilder.field(field);
- modified = true;
}
}
}
@@ -218,7 +213,6 @@ public class RestSearchAction extends BaseRestHandler {
if (sFields != null) {
for (String field : sFields) {
searchSourceBuilder.fieldDataField(field);
- modified = true;
}
}
}
@@ -226,12 +220,10 @@ public class RestSearchAction extends BaseRestHandler {
FetchSourceContext fetchSourceContext = FetchSourceContext.parseFromRestRequest(request);
if (fetchSourceContext != null) {
searchSourceBuilder.fetchSource(fetchSourceContext);
- modified = true;
}
if (request.hasParam("track_scores")) {
searchSourceBuilder.trackScores(request.paramAsBoolean("track_scores", false));
- modified = true;
}
String sSorts = request.param("sort");
@@ -244,14 +236,11 @@ public class RestSearchAction extends BaseRestHandler {
String reverse = sort.substring(delimiter + 1);
if ("asc".equals(reverse)) {
searchSourceBuilder.sort(sortField, SortOrder.ASC);
- modified = true;
} else if ("desc".equals(reverse)) {
searchSourceBuilder.sort(sortField, SortOrder.DESC);
- modified = true;
}
} else {
searchSourceBuilder.sort(sort);
- modified = true;
}
}
}
@@ -259,7 +248,6 @@ public class RestSearchAction extends BaseRestHandler {
String sStats = request.param("stats");
if (sStats != null) {
searchSourceBuilder.stats(Arrays.asList(Strings.splitStringByCommaToArray(sStats)));
- modified = true;
}
String suggestField = request.param("suggest_field");
@@ -271,8 +259,6 @@ public class RestSearchAction extends BaseRestHandler {
termSuggestion(suggestField).field(suggestField)
.text(suggestText).size(suggestSize)
.suggestMode(SuggestMode.resolve(suggestMode))));
- modified = true;
}
- return modified;
}
}
diff --git a/core/src/main/java/org/elasticsearch/rest/action/support/RestActions.java b/core/src/main/java/org/elasticsearch/rest/action/support/RestActions.java
index d8055ba94c0..692a9dc3402 100644
--- a/core/src/main/java/org/elasticsearch/rest/action/support/RestActions.java
+++ b/core/src/main/java/org/elasticsearch/rest/action/support/RestActions.java
@@ -114,14 +114,14 @@ public class RestActions {
return queryBuilder;
}
- public static SearchSourceBuilder getRestSearchSource(BytesReference sourceBytes, IndicesQueriesRegistry queryRegistry,
+ public static void parseRestSearchSource(SearchSourceBuilder source, BytesReference sourceBytes, IndicesQueriesRegistry queryRegistry,
ParseFieldMatcher parseFieldMatcher, AggregatorParsers aggParsers)
throws IOException {
XContentParser parser = XContentFactory.xContent(sourceBytes).createParser(sourceBytes);
QueryParseContext queryParseContext = new QueryParseContext(queryRegistry);
queryParseContext.reset(parser);
queryParseContext.parseFieldMatcher(parseFieldMatcher);
- return SearchSourceBuilder.parseSearchSource(parser, queryParseContext, aggParsers);
+ source.parseXContent(parser, queryParseContext, aggParsers);
}
/**
diff --git a/core/src/main/java/org/elasticsearch/rest/action/support/RestToXContentListener.java b/core/src/main/java/org/elasticsearch/rest/action/support/RestToXContentListener.java
index 01f39662e3c..055158f542c 100644
--- a/core/src/main/java/org/elasticsearch/rest/action/support/RestToXContentListener.java
+++ b/core/src/main/java/org/elasticsearch/rest/action/support/RestToXContentListener.java
@@ -30,7 +30,7 @@ import org.elasticsearch.rest.RestStatus;
* A REST based action listener that assumes the response is of type {@link ToXContent} and automatically
* builds an XContent based response (wrapping the toXContent in startObject/endObject).
*/
-public final class RestToXContentListener extends RestResponseListener {
+public class RestToXContentListener extends RestResponseListener {
public RestToXContentListener(RestChannel channel) {
super(channel);
@@ -45,6 +45,10 @@ public final class RestToXContentListener extends R
builder.startObject();
response.toXContent(builder, channel.request());
builder.endObject();
- return new BytesRestResponse(RestStatus.OK, builder);
+ return new BytesRestResponse(getStatus(response), builder);
+ }
+
+ protected RestStatus getStatus(Response response) {
+ return RestStatus.OK;
}
}
diff --git a/core/src/main/java/org/elasticsearch/search/SearchModule.java b/core/src/main/java/org/elasticsearch/search/SearchModule.java
index 8766bc7d1d3..2b39a2174b3 100644
--- a/core/src/main/java/org/elasticsearch/search/SearchModule.java
+++ b/core/src/main/java/org/elasticsearch/search/SearchModule.java
@@ -96,7 +96,7 @@ import org.elasticsearch.index.query.functionscore.random.RandomScoreFunctionPar
import org.elasticsearch.index.query.functionscore.script.ScriptScoreFunctionParser;
import org.elasticsearch.index.query.functionscore.weight.WeightBuilder;
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
-import org.elasticsearch.search.action.SearchServiceTransportAction;
+import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.aggregations.AggregationBinaryParseElement;
import org.elasticsearch.search.aggregations.AggregationParseElement;
import org.elasticsearch.search.aggregations.AggregationPhase;
@@ -452,7 +452,7 @@ public class SearchModule extends AbstractModule {
bind(QueryPhase.class).asEagerSingleton();
bind(SearchPhaseController.class).asEagerSingleton();
bind(FetchPhase.class).asEagerSingleton();
- bind(SearchServiceTransportAction.class).asEagerSingleton();
+ bind(SearchTransportService.class).asEagerSingleton();
if (searchServiceImpl == SearchService.class) {
bind(SearchService.class).asEagerSingleton();
} else {
diff --git a/core/src/main/java/org/elasticsearch/search/action/SearchServiceTransportAction.java b/core/src/main/java/org/elasticsearch/search/action/SearchTransportService.java
similarity index 80%
rename from core/src/main/java/org/elasticsearch/search/action/SearchServiceTransportAction.java
rename to core/src/main/java/org/elasticsearch/search/action/SearchTransportService.java
index 81fa590908d..a15d40e2e02 100644
--- a/core/src/main/java/org/elasticsearch/search/action/SearchServiceTransportAction.java
+++ b/core/src/main/java/org/elasticsearch/search/action/SearchTransportService.java
@@ -23,7 +23,6 @@ import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionListenerResponseHandler;
import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.OriginalIndices;
-import org.elasticsearch.action.search.ClearScrollRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.cluster.node.DiscoveryNode;
@@ -58,7 +57,7 @@ import java.io.IOException;
* An encapsulation of {@link org.elasticsearch.search.SearchService} operations exposed through
* transport.
*/
-public class SearchServiceTransportAction extends AbstractComponent {
+public class SearchTransportService extends AbstractComponent {
public static final String FREE_CONTEXT_SCROLL_ACTION_NAME = "indices:data/read/search[free_context/scroll]";
public static final String FREE_CONTEXT_ACTION_NAME = "indices:data/read/search[free_context]";
@@ -77,26 +76,39 @@ public class SearchServiceTransportAction extends AbstractComponent {
private final SearchService searchService;
@Inject
- public SearchServiceTransportAction(Settings settings, TransportService transportService, SearchService searchService) {
+ public SearchTransportService(Settings settings, TransportService transportService, SearchService searchService) {
super(settings);
this.transportService = transportService;
this.searchService = searchService;
- transportService.registerRequestHandler(FREE_CONTEXT_SCROLL_ACTION_NAME, ScrollFreeContextRequest::new, ThreadPool.Names.SAME, new FreeContextTransportHandler<>());
- transportService.registerRequestHandler(FREE_CONTEXT_ACTION_NAME, SearchFreeContextRequest::new, ThreadPool.Names.SAME, new FreeContextTransportHandler());
- transportService.registerRequestHandler(CLEAR_SCROLL_CONTEXTS_ACTION_NAME, ClearScrollContextsRequest::new, ThreadPool.Names.SAME, new ClearScrollContextsTransportHandler());
- transportService.registerRequestHandler(DFS_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH, new SearchDfsTransportHandler());
- transportService.registerRequestHandler(QUERY_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH, new SearchQueryTransportHandler());
- transportService.registerRequestHandler(QUERY_ID_ACTION_NAME, QuerySearchRequest::new, ThreadPool.Names.SEARCH, new SearchQueryByIdTransportHandler());
- transportService.registerRequestHandler(QUERY_SCROLL_ACTION_NAME, InternalScrollSearchRequest::new, ThreadPool.Names.SEARCH, new SearchQueryScrollTransportHandler());
- transportService.registerRequestHandler(QUERY_FETCH_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH, new SearchQueryFetchTransportHandler());
- transportService.registerRequestHandler(QUERY_QUERY_FETCH_ACTION_NAME, QuerySearchRequest::new, ThreadPool.Names.SEARCH, new SearchQueryQueryFetchTransportHandler());
- transportService.registerRequestHandler(QUERY_FETCH_SCROLL_ACTION_NAME, InternalScrollSearchRequest::new, ThreadPool.Names.SEARCH, new SearchQueryFetchScrollTransportHandler());
- transportService.registerRequestHandler(FETCH_ID_SCROLL_ACTION_NAME, ShardFetchRequest::new, ThreadPool.Names.SEARCH, new FetchByIdTransportHandler<>());
- transportService.registerRequestHandler(FETCH_ID_ACTION_NAME, ShardFetchSearchRequest::new, ThreadPool.Names.SEARCH, new FetchByIdTransportHandler());
+ transportService.registerRequestHandler(FREE_CONTEXT_SCROLL_ACTION_NAME, ScrollFreeContextRequest::new, ThreadPool.Names.SAME,
+ new FreeContextTransportHandler<>());
+ transportService.registerRequestHandler(FREE_CONTEXT_ACTION_NAME, SearchFreeContextRequest::new, ThreadPool.Names.SAME,
+ new FreeContextTransportHandler<>());
+ transportService.registerRequestHandler(CLEAR_SCROLL_CONTEXTS_ACTION_NAME, ClearScrollContextsRequest::new, ThreadPool.Names.SAME,
+ new ClearScrollContextsTransportHandler());
+ transportService.registerRequestHandler(DFS_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH,
+ new SearchDfsTransportHandler());
+ transportService.registerRequestHandler(QUERY_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH,
+ new SearchQueryTransportHandler());
+ transportService.registerRequestHandler(QUERY_ID_ACTION_NAME, QuerySearchRequest::new, ThreadPool.Names.SEARCH,
+ new SearchQueryByIdTransportHandler());
+ transportService.registerRequestHandler(QUERY_SCROLL_ACTION_NAME, InternalScrollSearchRequest::new, ThreadPool.Names.SEARCH,
+ new SearchQueryScrollTransportHandler());
+ transportService.registerRequestHandler(QUERY_FETCH_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH,
+ new SearchQueryFetchTransportHandler());
+ transportService.registerRequestHandler(QUERY_QUERY_FETCH_ACTION_NAME, QuerySearchRequest::new, ThreadPool.Names.SEARCH,
+ new SearchQueryQueryFetchTransportHandler());
+ transportService.registerRequestHandler(QUERY_FETCH_SCROLL_ACTION_NAME, InternalScrollSearchRequest::new, ThreadPool.Names.SEARCH,
+ new SearchQueryFetchScrollTransportHandler());
+ transportService.registerRequestHandler(FETCH_ID_SCROLL_ACTION_NAME, ShardFetchRequest::new, ThreadPool.Names.SEARCH,
+ new FetchByIdTransportHandler<>());
+ transportService.registerRequestHandler(FETCH_ID_ACTION_NAME, ShardFetchSearchRequest::new, ThreadPool.Names.SEARCH,
+ new FetchByIdTransportHandler<>());
}
public void sendFreeContext(DiscoveryNode node, final long contextId, SearchRequest request) {
- transportService.sendRequest(node, FREE_CONTEXT_ACTION_NAME, new SearchFreeContextRequest(request, contextId), new ActionListenerResponseHandler(new ActionListener() {
+ transportService.sendRequest(node, FREE_CONTEXT_ACTION_NAME, new SearchFreeContextRequest(request, contextId),
+ new ActionListenerResponseHandler(new ActionListener() {
@Override
public void onResponse(SearchFreeContextResponse response) {
// no need to respond if it was freed or not
@@ -114,8 +126,9 @@ public class SearchServiceTransportAction extends AbstractComponent {
});
}
- public void sendFreeContext(DiscoveryNode node, long contextId, ClearScrollRequest request, final ActionListener listener) {
- transportService.sendRequest(node, FREE_CONTEXT_SCROLL_ACTION_NAME, new ScrollFreeContextRequest(request, contextId), new ActionListenerResponseHandler(listener) {
+ public void sendFreeContext(DiscoveryNode node, long contextId, final ActionListener listener) {
+ transportService.sendRequest(node, FREE_CONTEXT_SCROLL_ACTION_NAME, new ScrollFreeContextRequest(contextId),
+ new ActionListenerResponseHandler(listener) {
@Override
public SearchFreeContextResponse newInstance() {
return new SearchFreeContextResponse();
@@ -123,8 +136,9 @@ public class SearchServiceTransportAction extends AbstractComponent {
});
}
- public void sendClearAllScrollContexts(DiscoveryNode node, ClearScrollRequest request, final ActionListener listener) {
- transportService.sendRequest(node, CLEAR_SCROLL_CONTEXTS_ACTION_NAME, new ClearScrollContextsRequest(), new ActionListenerResponseHandler(listener) {
+ public void sendClearAllScrollContexts(DiscoveryNode node, final ActionListener listener) {
+ transportService.sendRequest(node, CLEAR_SCROLL_CONTEXTS_ACTION_NAME, new ClearScrollContextsRequest(),
+ new ActionListenerResponseHandler(listener) {
@Override
public TransportResponse newInstance() {
return TransportResponse.Empty.INSTANCE;
@@ -132,7 +146,8 @@ public class SearchServiceTransportAction extends AbstractComponent {
});
}
- public void sendExecuteDfs(DiscoveryNode node, final ShardSearchTransportRequest request, final ActionListener listener) {
+ public void sendExecuteDfs(DiscoveryNode node, final ShardSearchTransportRequest request,
+ final ActionListener listener) {
transportService.sendRequest(node, DFS_ACTION_NAME, request, new ActionListenerResponseHandler(listener) {
@Override
public DfsSearchResult newInstance() {
@@ -141,8 +156,10 @@ public class SearchServiceTransportAction extends AbstractComponent {
});
}
- public void sendExecuteQuery(DiscoveryNode node, final ShardSearchTransportRequest request, final ActionListener listener) {
- transportService.sendRequest(node, QUERY_ACTION_NAME, request, new ActionListenerResponseHandler(listener) {
+ public void sendExecuteQuery(DiscoveryNode node, final ShardSearchTransportRequest request,
+ final ActionListener listener) {
+ transportService.sendRequest(node, QUERY_ACTION_NAME, request,
+ new ActionListenerResponseHandler(listener) {
@Override
public QuerySearchResult newInstance() {
return new QuerySearchResult();
@@ -159,8 +176,10 @@ public class SearchServiceTransportAction extends AbstractComponent {
});
}
- public void sendExecuteQuery(DiscoveryNode node, final InternalScrollSearchRequest request, final ActionListener listener) {
- transportService.sendRequest(node, QUERY_SCROLL_ACTION_NAME, request, new ActionListenerResponseHandler(listener) {
+ public void sendExecuteQuery(DiscoveryNode node, final InternalScrollSearchRequest request,
+ final ActionListener listener) {
+ transportService.sendRequest(node, QUERY_SCROLL_ACTION_NAME, request,
+ new ActionListenerResponseHandler(listener) {
@Override
public ScrollQuerySearchResult newInstance() {
return new ScrollQuerySearchResult();
@@ -168,8 +187,10 @@ public class SearchServiceTransportAction extends AbstractComponent {
});
}
- public void sendExecuteFetch(DiscoveryNode node, final ShardSearchTransportRequest request, final ActionListener listener) {
- transportService.sendRequest(node, QUERY_FETCH_ACTION_NAME, request, new ActionListenerResponseHandler(listener) {
+ public void sendExecuteFetch(DiscoveryNode node, final ShardSearchTransportRequest request,
+ final ActionListener listener) {
+ transportService.sendRequest(node, QUERY_FETCH_ACTION_NAME, request,
+ new ActionListenerResponseHandler(listener) {
@Override
public QueryFetchSearchResult newInstance() {
return new QueryFetchSearchResult();
@@ -177,8 +198,10 @@ public class SearchServiceTransportAction extends AbstractComponent {
});
}
- public void sendExecuteFetch(DiscoveryNode node, final QuerySearchRequest request, final ActionListener listener) {
- transportService.sendRequest(node, QUERY_QUERY_FETCH_ACTION_NAME, request, new ActionListenerResponseHandler(listener) {
+ public void sendExecuteFetch(DiscoveryNode node, final QuerySearchRequest request,
+ final ActionListener listener) {
+ transportService.sendRequest(node, QUERY_QUERY_FETCH_ACTION_NAME, request,
+ new ActionListenerResponseHandler(listener) {
@Override
public QueryFetchSearchResult newInstance() {
return new QueryFetchSearchResult();
@@ -186,8 +209,10 @@ public class SearchServiceTransportAction extends AbstractComponent {
});
}
- public void sendExecuteFetch(DiscoveryNode node, final InternalScrollSearchRequest request, final ActionListener listener) {
- transportService.sendRequest(node, QUERY_FETCH_SCROLL_ACTION_NAME, request, new ActionListenerResponseHandler(listener) {
+ public void sendExecuteFetch(DiscoveryNode node, final InternalScrollSearchRequest request,
+ final ActionListener listener) {
+ transportService.sendRequest(node, QUERY_FETCH_SCROLL_ACTION_NAME, request,
+ new ActionListenerResponseHandler(listener) {
@Override
public ScrollQueryFetchSearchResult newInstance() {
return new ScrollQueryFetchSearchResult();
@@ -195,15 +220,18 @@ public class SearchServiceTransportAction extends AbstractComponent {
});
}
- public void sendExecuteFetch(DiscoveryNode node, final ShardFetchSearchRequest request, final ActionListener listener) {
+ public void sendExecuteFetch(DiscoveryNode node, final ShardFetchSearchRequest request,
+ final ActionListener listener) {
sendExecuteFetch(node, FETCH_ID_ACTION_NAME, request, listener);
}
- public void sendExecuteFetchScroll(DiscoveryNode node, final ShardFetchRequest request, final ActionListener listener) {
+ public void sendExecuteFetchScroll(DiscoveryNode node, final ShardFetchRequest request,
+ final ActionListener listener) {
sendExecuteFetch(node, FETCH_ID_SCROLL_ACTION_NAME, request, listener);
}
- private void sendExecuteFetch(DiscoveryNode node, String action, final ShardFetchRequest request, final ActionListener listener) {
+ private void sendExecuteFetch(DiscoveryNode node, String action, final ShardFetchRequest request,
+ final ActionListener listener) {
transportService.sendRequest(node, action, request, new ActionListenerResponseHandler(listener) {
@Override
public FetchSearchResult newInstance() {
@@ -212,17 +240,13 @@ public class SearchServiceTransportAction extends AbstractComponent {
});
}
- public static class ScrollFreeContextRequest extends TransportRequest {
+ static class ScrollFreeContextRequest extends TransportRequest {
private long id;
- public ScrollFreeContextRequest() {
+ ScrollFreeContextRequest() {
}
- ScrollFreeContextRequest(ClearScrollRequest request, long id) {
- this(id);
- }
-
- private ScrollFreeContextRequest(long id) {
+ ScrollFreeContextRequest(long id) {
this.id = id;
}
@@ -243,7 +267,7 @@ public class SearchServiceTransportAction extends AbstractComponent {
}
}
- public static class SearchFreeContextRequest extends ScrollFreeContextRequest implements IndicesRequest {
+ static class SearchFreeContextRequest extends ScrollFreeContextRequest implements IndicesRequest {
private OriginalIndices originalIndices;
public SearchFreeContextRequest() {
@@ -311,7 +335,8 @@ public class SearchServiceTransportAction extends AbstractComponent {
}
}
- class FreeContextTransportHandler implements TransportRequestHandler {
+ class FreeContextTransportHandler
+ implements TransportRequestHandler {
@Override
public void messageReceived(FreeContextRequest request, TransportChannel channel) throws Exception {
boolean freed = searchService.freeContext(request.id());
@@ -319,7 +344,7 @@ public class SearchServiceTransportAction extends AbstractComponent {
}
}
- public static class ClearScrollContextsRequest extends TransportRequest {
+ static class ClearScrollContextsRequest extends TransportRequest {
}
class ClearScrollContextsTransportHandler implements TransportRequestHandler {
@@ -393,5 +418,4 @@ public class SearchServiceTransportAction extends AbstractComponent {
channel.sendResponse(result);
}
}
-
}
diff --git a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java
index e1a9e9fe67b..a97bc2e21d7 100644
--- a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java
+++ b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java
@@ -734,9 +734,22 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
return ext;
}
+ /**
+ * Create a new SearchSourceBuilder with attributes set by an xContent.
+ */
public SearchSourceBuilder fromXContent(XContentParser parser, QueryParseContext context, AggregatorParsers aggParsers)
throws IOException {
SearchSourceBuilder builder = new SearchSourceBuilder();
+ builder.parseXContent(parser, context, aggParsers);
+ return builder;
+ }
+
+ /**
+ * Parse some xContent into this SearchSourceBuilder, overwriting any values specified in the xContent. Use this if you need to set up
+ * different defaults than a regular SearchSourceBuilder would have and use
+ * {@link #fromXContent(XContentParser, QueryParseContext, AggregatorParsers)} if you have normal defaults.
+ */
+ public void parseXContent(XContentParser parser, QueryParseContext context, AggregatorParsers aggParsers) throws IOException {
XContentParser.Token token = parser.currentToken();
String currentFieldName = null;
if (token != XContentParser.Token.START_OBJECT && (token = parser.nextToken()) != XContentParser.Token.START_OBJECT) {
@@ -748,44 +761,42 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
currentFieldName = parser.currentName();
} else if (token.isValue()) {
if (context.parseFieldMatcher().match(currentFieldName, FROM_FIELD)) {
- builder.from = parser.intValue();
+ from = parser.intValue();
} else if (context.parseFieldMatcher().match(currentFieldName, SIZE_FIELD)) {
- builder.size = parser.intValue();
+ size = parser.intValue();
} else if (context.parseFieldMatcher().match(currentFieldName, TIMEOUT_FIELD)) {
- builder.timeoutInMillis = parser.longValue();
+ timeoutInMillis = parser.longValue();
} else if (context.parseFieldMatcher().match(currentFieldName, TERMINATE_AFTER_FIELD)) {
- builder.terminateAfter = parser.intValue();
+ terminateAfter = parser.intValue();
} else if (context.parseFieldMatcher().match(currentFieldName, MIN_SCORE_FIELD)) {
- builder.minScore = parser.floatValue();
+ minScore = parser.floatValue();
} else if (context.parseFieldMatcher().match(currentFieldName, VERSION_FIELD)) {
- builder.version = parser.booleanValue();
+ version = parser.booleanValue();
} else if (context.parseFieldMatcher().match(currentFieldName, EXPLAIN_FIELD)) {
- builder.explain = parser.booleanValue();
+ explain = parser.booleanValue();
} else if (context.parseFieldMatcher().match(currentFieldName, TRACK_SCORES_FIELD)) {
- builder.trackScores = parser.booleanValue();
+ trackScores = parser.booleanValue();
} else if (context.parseFieldMatcher().match(currentFieldName, _SOURCE_FIELD)) {
- builder.fetchSourceContext = FetchSourceContext.parse(parser, context);
+ fetchSourceContext = FetchSourceContext.parse(parser, context);
} else if (context.parseFieldMatcher().match(currentFieldName, FIELDS_FIELD)) {
- List fieldNames = new ArrayList<>();
fieldNames.add(parser.text());
- builder.fieldNames = fieldNames;
} else if (context.parseFieldMatcher().match(currentFieldName, SORT_FIELD)) {
- builder.sort(parser.text());
+ sort(parser.text());
} else if (context.parseFieldMatcher().match(currentFieldName, PROFILE_FIELD)) {
- builder.profile = parser.booleanValue();
+ profile = parser.booleanValue();
} else {
throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName + "].",
parser.getTokenLocation());
}
} else if (token == XContentParser.Token.START_OBJECT) {
if (context.parseFieldMatcher().match(currentFieldName, QUERY_FIELD)) {
- builder.queryBuilder = context.parseInnerQueryBuilder();
+ queryBuilder = context.parseInnerQueryBuilder();
} else if (context.parseFieldMatcher().match(currentFieldName, POST_FILTER_FIELD)) {
- builder.postQueryBuilder = context.parseInnerQueryBuilder();
+ postQueryBuilder = context.parseInnerQueryBuilder();
} else if (context.parseFieldMatcher().match(currentFieldName, _SOURCE_FIELD)) {
- builder.fetchSourceContext = FetchSourceContext.parse(parser, context);
+ fetchSourceContext = FetchSourceContext.parse(parser, context);
} else if (context.parseFieldMatcher().match(currentFieldName, SCRIPT_FIELDS_FIELD)) {
- List scriptFields = new ArrayList<>();
+ scriptFields = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
String scriptFieldName = parser.currentName();
token = parser.nextToken();
@@ -822,9 +833,8 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
+ currentFieldName + "] but found [" + token + "]", parser.getTokenLocation());
}
}
- builder.scriptFields = scriptFields;
} else if (context.parseFieldMatcher().match(currentFieldName, INDICES_BOOST_FIELD)) {
- ObjectFloatHashMap indexBoost = new ObjectFloatHashMap();
+ indexBoost = new ObjectFloatHashMap();
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
@@ -835,25 +845,23 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
parser.getTokenLocation());
}
}
- builder.indexBoost = indexBoost;
} else if (context.parseFieldMatcher().match(currentFieldName, AGGREGATIONS_FIELD)) {
- builder.aggregations = aggParsers.parseAggregators(parser, context);
+ aggregations = aggParsers.parseAggregators(parser, context);
} else if (context.parseFieldMatcher().match(currentFieldName, HIGHLIGHT_FIELD)) {
- builder.highlightBuilder = HighlightBuilder.PROTOTYPE.fromXContent(context);
+ highlightBuilder = HighlightBuilder.PROTOTYPE.fromXContent(context);
} else if (context.parseFieldMatcher().match(currentFieldName, INNER_HITS_FIELD)) {
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser);
- builder.innerHitsBuilder = xContentBuilder.bytes();
+ innerHitsBuilder = xContentBuilder.bytes();
} else if (context.parseFieldMatcher().match(currentFieldName, SUGGEST_FIELD)) {
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser);
- builder.suggestBuilder = xContentBuilder.bytes();
+ suggestBuilder = xContentBuilder.bytes();
} else if (context.parseFieldMatcher().match(currentFieldName, SORT_FIELD)) {
- List sorts = new ArrayList<>();
+ sorts = new ArrayList<>();
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser);
sorts.add(xContentBuilder.bytes());
- builder.sorts = sorts;
} else if (context.parseFieldMatcher().match(currentFieldName, EXT_FIELD)) {
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser);
- builder.ext = xContentBuilder.bytes();
+ ext = xContentBuilder.bytes();
} else {
throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName + "].",
parser.getTokenLocation());
@@ -861,7 +869,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
} else if (token == XContentParser.Token.START_ARRAY) {
if (context.parseFieldMatcher().match(currentFieldName, FIELDS_FIELD)) {
- List fieldNames = new ArrayList<>();
+ fieldNames = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token == XContentParser.Token.VALUE_STRING) {
fieldNames.add(parser.text());
@@ -870,9 +878,8 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
+ currentFieldName + "] but found [" + token + "]", parser.getTokenLocation());
}
}
- builder.fieldNames = fieldNames;
} else if (context.parseFieldMatcher().match(currentFieldName, FIELDDATA_FIELDS_FIELD)) {
- List fieldDataFields = new ArrayList<>();
+ fieldDataFields = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token == XContentParser.Token.VALUE_STRING) {
fieldDataFields.add(parser.text());
@@ -881,22 +888,19 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
+ currentFieldName + "] but found [" + token + "]", parser.getTokenLocation());
}
}
- builder.fieldDataFields = fieldDataFields;
} else if (context.parseFieldMatcher().match(currentFieldName, SORT_FIELD)) {
- List sorts = new ArrayList<>();
+ sorts = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser);
sorts.add(xContentBuilder.bytes());
}
- builder.sorts = sorts;
} else if (context.parseFieldMatcher().match(currentFieldName, RESCORE_FIELD)) {
- List> rescoreBuilders = new ArrayList<>();
+ rescoreBuilders = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
rescoreBuilders.add(RescoreBuilder.parseFromXContent(context));
}
- builder.rescoreBuilders = rescoreBuilders;
} else if (context.parseFieldMatcher().match(currentFieldName, STATS_FIELD)) {
- List stats = new ArrayList<>();
+ stats = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token == XContentParser.Token.VALUE_STRING) {
stats.add(parser.text());
@@ -905,11 +909,10 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
+ currentFieldName + "] but found [" + token + "]", parser.getTokenLocation());
}
}
- builder.stats = stats;
} else if (context.parseFieldMatcher().match(currentFieldName, _SOURCE_FIELD)) {
- builder.fetchSourceContext = FetchSourceContext.parse(parser, context);
+ fetchSourceContext = FetchSourceContext.parse(parser, context);
} else if (context.parseFieldMatcher().match(currentFieldName, SEARCH_AFTER)) {
- builder.searchAfterBuilder = SearchAfterBuilder.PROTOTYPE.fromXContent(parser, context.parseFieldMatcher());
+ searchAfterBuilder = SearchAfterBuilder.PROTOTYPE.fromXContent(parser, context.parseFieldMatcher());
} else {
throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName + "].",
parser.getTokenLocation());
@@ -919,7 +922,6 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
parser.getTokenLocation());
}
}
- return builder;
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/tasks/CancellableTask.java b/core/src/main/java/org/elasticsearch/tasks/CancellableTask.java
index 8916a8be7cb..9e5506b2f14 100644
--- a/core/src/main/java/org/elasticsearch/tasks/CancellableTask.java
+++ b/core/src/main/java/org/elasticsearch/tasks/CancellableTask.java
@@ -19,6 +19,8 @@
package org.elasticsearch.tasks;
+import org.elasticsearch.common.Nullable;
+
import java.util.concurrent.atomic.AtomicReference;
/**
@@ -56,4 +58,11 @@ public class CancellableTask extends Task {
return reason.get() != null;
}
+ /**
+ * The reason the task was cancelled or null if it hasn't been cancelled.
+ */
+ @Nullable
+ public String getReasonCancelled() {
+ return reason.get();
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/tasks/LoggingTaskListener.java b/core/src/main/java/org/elasticsearch/tasks/LoggingTaskListener.java
new file mode 100644
index 00000000000..b2016f094f5
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/tasks/LoggingTaskListener.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.tasks;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+
+/**
+ * A TaskListener that just logs the response at the info level. Used when we
+ * need a listener but aren't returning the result to the user.
+ */
+public final class LoggingTaskListener implements TaskListener {
+ private final static ESLogger logger = Loggers.getLogger(LoggingTaskListener.class);
+
+ /**
+ * Get the instance of NoopActionListener cast appropriately.
+ */
+ @SuppressWarnings("unchecked") // Safe because we only toString the response
+ public static TaskListener instance() {
+ return (TaskListener) INSTANCE;
+ }
+
+ private static final LoggingTaskListener INSTANCE = new LoggingTaskListener();
+
+ private LoggingTaskListener() {
+ }
+
+ @Override
+ public void onResponse(Task task, Response response) {
+ logger.info("{} finished with response {}", task.getId(), response);
+ }
+
+ @Override
+ public void onFailure(Task task, Throwable e) {
+ logger.warn("{} failed with exception", e, task.getId());
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/tasks/TaskListener.java b/core/src/main/java/org/elasticsearch/tasks/TaskListener.java
new file mode 100644
index 00000000000..6a0c36e0b83
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/tasks/TaskListener.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.tasks;
+
+/**
+ * Listener for Task success or failure.
+ */
+public interface TaskListener {
+ /**
+ * Handle task response. This response may constitute a failure or a success
+ * but it is up to the listener to make that decision.
+ *
+ * @param task
+ * the task being executed. May be null if the action doesn't
+ * create a task
+ * @param response
+ * the response from the action that executed the task
+ */
+ void onResponse(Task task, Response response);
+
+ /**
+ * A failure caused by an exception at some phase of the task.
+ *
+ * @param task
+ * the task being executed. May be null if the action doesn't
+ * create a task
+ * @param e
+ * the failure
+ */
+ void onFailure(Task task, Throwable e);
+
+}
diff --git a/core/src/main/java/org/elasticsearch/transport/BindTransportException.java b/core/src/main/java/org/elasticsearch/transport/BindTransportException.java
index 66591c7e501..4f55c04a1b1 100644
--- a/core/src/main/java/org/elasticsearch/transport/BindTransportException.java
+++ b/core/src/main/java/org/elasticsearch/transport/BindTransportException.java
@@ -35,4 +35,8 @@ public class BindTransportException extends TransportException {
public BindTransportException(String message, Throwable cause) {
super(message, cause);
}
-}
\ No newline at end of file
+
+ public BindTransportException(String message) {
+ super(message);
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java
index 685fdeda683..e3038feaee7 100644
--- a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java
+++ b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java
@@ -19,6 +19,8 @@
package org.elasticsearch.transport.netty;
+import com.carrotsearch.hppc.IntHashSet;
+import com.carrotsearch.hppc.IntSet;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.node.DiscoveryNode;
@@ -535,8 +537,16 @@ public class NettyTransport extends AbstractLifecycleComponent implem
throw new BindTransportException("Failed to resolve publish address", e);
}
+ final int publishPort = resolvePublishPort(name, settings, profileSettings, boundAddresses, publishInetAddress);
+ final TransportAddress publishAddress = new InetSocketTransportAddress(new InetSocketAddress(publishInetAddress, publishPort));
+ return new BoundTransportAddress(transportBoundAddresses, publishAddress);
+ }
+
+ // package private for tests
+ static int resolvePublishPort(String profileName, Settings settings, Settings profileSettings, List boundAddresses,
+ InetAddress publishInetAddress) {
int publishPort;
- if (TransportSettings.DEFAULT_PROFILE.equals(name)) {
+ if (TransportSettings.DEFAULT_PROFILE.equals(profileName)) {
publishPort = TransportSettings.PUBLISH_PORT.get(settings);
} else {
publishPort = profileSettings.getAsInt("publish_port", -1);
@@ -553,17 +563,25 @@ public class NettyTransport extends AbstractLifecycleComponent implem
}
}
- // if port still not matches, just take port of first bound address
+ // if no matching boundAddress found, check if there is a unique port for all bound addresses
if (publishPort < 0) {
- // TODO: In case of DEFAULT_PROFILE we should probably fail here, as publish address does not match any bound address
- // In case of a custom profile, we might use the publish address of the default profile
- publishPort = boundAddresses.get(0).getPort();
- logger.warn("Publish port not found by matching publish address [{}] to bound addresses [{}], "
- + "falling back to port [{}] of first bound address", publishInetAddress, boundAddresses, publishPort);
+ final IntSet ports = new IntHashSet();
+ for (InetSocketAddress boundAddress : boundAddresses) {
+ ports.add(boundAddress.getPort());
+ }
+ if (ports.size() == 1) {
+ publishPort = ports.iterator().next().value;
+ }
}
- final TransportAddress publishAddress = new InetSocketTransportAddress(new InetSocketAddress(publishInetAddress, publishPort));
- return new BoundTransportAddress(transportBoundAddresses, publishAddress);
+ if (publishPort < 0) {
+ String profileExplanation = TransportSettings.DEFAULT_PROFILE.equals(profileName) ? "" : " for profile " + profileName;
+ throw new BindTransportException("Failed to auto-resolve publish port" + profileExplanation + ", multiple bound addresses " +
+ boundAddresses + " with distinct ports and none of them matched the publish address (" + publishInetAddress + "). " +
+ "Please specify a unique port by setting " + TransportSettings.PORT.getKey() + " or " +
+ TransportSettings.PUBLISH_PORT.getKey());
+ }
+ return publishPort;
}
private void createServerBootstrap(String name, Settings settings) {
diff --git a/core/src/main/resources/org/elasticsearch/plugins/plugin-install.help b/core/src/main/resources/org/elasticsearch/plugins/plugin-install.help
index 7037974ede3..ba39e1ab8fb 100644
--- a/core/src/main/resources/org/elasticsearch/plugins/plugin-install.help
+++ b/core/src/main/resources/org/elasticsearch/plugins/plugin-install.help
@@ -55,5 +55,5 @@ OPTIONS
-v,--verbose Verbose output
-h,--help Shows this message
-
+
-b,--batch Enable batch mode explicitly, automatic confirmation of security permissions
diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilderTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilderTests.java
index 97375061de5..ad17689bb78 100644
--- a/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilderTests.java
+++ b/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilderTests.java
@@ -22,8 +22,8 @@ package org.elasticsearch.action.admin.indices.create;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
-import org.elasticsearch.rest.NoOpClient;
import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.test.client.NoOpClient;
import org.junit.After;
import org.junit.Before;
diff --git a/core/src/test/java/org/elasticsearch/action/bulk/BulkShardRequestTests.java b/core/src/test/java/org/elasticsearch/action/bulk/BulkShardRequestTests.java
new file mode 100644
index 00000000000..3ad343e2469
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/bulk/BulkShardRequestTests.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.bulk;
+
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.test.ESTestCase;
+
+import static org.apache.lucene.util.TestUtil.randomSimpleString;
+
+public class BulkShardRequestTests extends ESTestCase {
+ public void testToString() {
+ String index = randomSimpleString(getRandom(), 10);
+ int count = between(1, 100);
+ BulkShardRequest r = new BulkShardRequest(null, new ShardId(index, "ignored", 0), false, new BulkItemRequest[count]);
+ assertEquals("BulkShardRequest to [" + index + "] containing [" + count + "] requests", r.toString());
+ r = new BulkShardRequest(null, new ShardId(index, "ignored", 0), true, new BulkItemRequest[count]);
+ assertEquals("BulkShardRequest to [" + index + "] containing [" + count + "] requests and a refresh", r.toString());
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java b/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java
index ebb3b5211f1..6d9987394f9 100644
--- a/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java
+++ b/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java
@@ -25,8 +25,8 @@ import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
-import org.elasticsearch.rest.NoOpClient;
import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.test.client.NoOpClient;
import org.junit.After;
import org.junit.Before;
diff --git a/core/src/test/java/org/elasticsearch/action/index/IndexRequestBuilderTests.java b/core/src/test/java/org/elasticsearch/action/index/IndexRequestBuilderTests.java
index badb79e21b7..06e9d586e36 100644
--- a/core/src/test/java/org/elasticsearch/action/index/IndexRequestBuilderTests.java
+++ b/core/src/test/java/org/elasticsearch/action/index/IndexRequestBuilderTests.java
@@ -23,8 +23,8 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentType;
-import org.elasticsearch.rest.NoOpClient;
import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.test.client.NoOpClient;
import org.junit.After;
import org.junit.Before;
diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java b/core/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java
new file mode 100644
index 00000000000..cefd3a6703a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java
@@ -0,0 +1,375 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import com.carrotsearch.hppc.cursors.ObjectCursor;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.DummyTransportAddress;
+import org.elasticsearch.test.ESTestCase;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * Tests for the {@link ClusterChangedEvent} class.
+ */
+public class ClusterChangedEventTests extends ESTestCase {
+
+ private static final ClusterName TEST_CLUSTER_NAME = new ClusterName("test");
+ private static final int INDICES_CHANGE_NUM_TESTS = 5;
+ private static final String NODE_ID_PREFIX = "node_";
+ private static final String INITIAL_CLUSTER_ID = Strings.randomBase64UUID();
+ // the initial indices which every cluster state test starts out with
+ private static final List initialIndices = Arrays.asList("idx1", "idx2", "idx3");
+ // index settings
+ private static final Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
+
+ /**
+ * Test basic properties of the ClusterChangedEvent class:
+ * (1) make sure there are no null values for any of its properties
+ * (2) make sure you can't create a ClusterChangedEvent with any null values
+ */
+ public void testBasicProperties() {
+ ClusterState newState = createSimpleClusterState();
+ ClusterState previousState = createSimpleClusterState();
+ ClusterChangedEvent event = new ClusterChangedEvent("_na_", newState, previousState);
+ assertThat(event.source(), equalTo("_na_"));
+ assertThat(event.state(), equalTo(newState));
+ assertThat(event.previousState(), equalTo(previousState));
+ assertNotNull("nodesDelta should not be null", event.nodesDelta());
+
+ // should not be able to create a ClusterChangedEvent with null values for any of the constructor args
+ try {
+ event = new ClusterChangedEvent(null, newState, previousState);
+ fail("should not have created a ClusterChangedEvent from a null source: " + event.source());
+ } catch (NullPointerException e) {
+ }
+ try {
+ event = new ClusterChangedEvent("_na_", null, previousState);
+ fail("should not have created a ClusterChangedEvent from a null state: " + event.state());
+ } catch (NullPointerException e) {
+ }
+ try {
+ event = new ClusterChangedEvent("_na_", newState, null);
+ fail("should not have created a ClusterChangedEvent from a null previousState: " + event.previousState());
+ } catch (NullPointerException e) {
+ }
+ }
+
+ /**
+ * Test whether the ClusterChangedEvent returns the correct value for whether the local node is master,
+ * based on what was set on the cluster state.
+ */
+ public void testLocalNodeIsMaster() {
+ final int numNodesInCluster = 3;
+ ClusterState previousState = createSimpleClusterState();
+ ClusterState newState = createState(numNodesInCluster, true, initialIndices);
+ ClusterChangedEvent event = new ClusterChangedEvent("_na_", newState, previousState);
+ assertTrue("local node should be master", event.localNodeMaster());
+
+ newState = createState(numNodesInCluster, false, initialIndices);
+ event = new ClusterChangedEvent("_na_", newState, previousState);
+ assertFalse("local node should not be master", event.localNodeMaster());
+ }
+
+ /**
+ * Test that the indices created and indices deleted lists between two cluster states
+ * are correct when there is no change in the cluster UUID. Also tests metadata equality
+ * between cluster states.
+ */
+ public void testMetaDataChangesOnNoMasterChange() {
+ metaDataChangesCheck(false);
+ }
+
+ /**
+ * Test that the indices created and indices deleted lists between two cluster states
+ * are correct when there is a change in the cluster UUID. Also tests metadata equality
+ * between cluster states.
+ */
+ public void testMetaDataChangesOnNewClusterUUID() {
+ metaDataChangesCheck(true);
+ }
+
+ /**
+ * Test the index metadata change check.
+ */
+ public void testIndexMetaDataChange() {
+ final int numNodesInCluster = 3;
+ final ClusterState originalState = createState(numNodesInCluster, randomBoolean(), initialIndices);
+ final ClusterState newState = originalState; // doesn't matter for this test, just need a non-null value
+ final ClusterChangedEvent event = new ClusterChangedEvent("_na_", originalState, newState);
+
+ // test when its not the same IndexMetaData
+ final String indexId = initialIndices.get(0);
+ final IndexMetaData originalIndexMeta = originalState.metaData().index(indexId);
+ // make sure the metadata is actually on the cluster state
+ assertNotNull("IndexMetaData for " + indexId + " should exist on the cluster state", originalIndexMeta);
+ IndexMetaData newIndexMeta = createIndexMetadata(indexId, originalIndexMeta.getVersion() + 1);
+ assertTrue("IndexMetaData with different version numbers must be considered changed", event.indexMetaDataChanged(newIndexMeta));
+
+ // test when it doesn't exist
+ newIndexMeta = createIndexMetadata("doesntexist");
+ assertTrue("IndexMetaData that didn't previously exist should be considered changed", event.indexMetaDataChanged(newIndexMeta));
+
+ // test when its the same IndexMetaData
+ assertFalse("IndexMetaData should be the same", event.indexMetaDataChanged(originalIndexMeta));
+ }
+
+ /**
+ * Test nodes added/removed/changed checks.
+ */
+ public void testNodesAddedAndRemovedAndChanged() {
+ final int numNodesInCluster = 4;
+ final ClusterState originalState = createState(numNodesInCluster, randomBoolean(), initialIndices);
+
+ // test when nodes have not been added or removed between cluster states
+ ClusterState newState = createState(numNodesInCluster, randomBoolean(), initialIndices);
+ ClusterChangedEvent event = new ClusterChangedEvent("_na_", newState, originalState);
+ assertFalse("Nodes should not have been added between cluster states", event.nodesAdded());
+ assertFalse("Nodes should not have been removed between cluster states", event.nodesRemoved());
+ assertFalse("Nodes should not have been changed between cluster states", event.nodesChanged());
+
+ // test when nodes have been removed between cluster states
+ newState = createState(numNodesInCluster - 1, randomBoolean(), initialIndices);
+ event = new ClusterChangedEvent("_na_", newState, originalState);
+ assertTrue("Nodes should have been removed between cluster states", event.nodesRemoved());
+ assertFalse("Nodes should not have been added between cluster states", event.nodesAdded());
+ assertTrue("Nodes should have been changed between cluster states", event.nodesChanged());
+
+ // test when nodes have been added between cluster states
+ newState = createState(numNodesInCluster + 1, randomBoolean(), initialIndices);
+ event = new ClusterChangedEvent("_na_", newState, originalState);
+ assertFalse("Nodes should not have been removed between cluster states", event.nodesRemoved());
+ assertTrue("Nodes should have been added between cluster states", event.nodesAdded());
+ assertTrue("Nodes should have been changed between cluster states", event.nodesChanged());
+
+ // test when nodes both added and removed between cluster states
+ // here we reuse the newState from the previous run which already added extra nodes
+ newState = nextState(newState, randomBoolean(), Collections.emptyList(), Collections.emptyList(), 1);
+ event = new ClusterChangedEvent("_na_", newState, originalState);
+ assertTrue("Nodes should have been removed between cluster states", event.nodesRemoved());
+ assertTrue("Nodes should have been added between cluster states", event.nodesAdded());
+ assertTrue("Nodes should have been changed between cluster states", event.nodesChanged());
+ }
+
+ /**
+ * Test the routing table changes checks.
+ */
+ public void testRoutingTableChanges() {
+ final int numNodesInCluster = 3;
+ final ClusterState originalState = createState(numNodesInCluster, randomBoolean(), initialIndices);
+
+ // routing tables and index routing tables are same object
+ ClusterState newState = ClusterState.builder(originalState).build();
+ ClusterChangedEvent event = new ClusterChangedEvent("_na_", originalState, newState);
+ assertFalse("routing tables should be the same object", event.routingTableChanged());
+ assertFalse("index routing table should be the same object", event.indexRoutingTableChanged(initialIndices.get(0)));
+
+ // routing tables and index routing tables aren't same object
+ newState = createState(numNodesInCluster, randomBoolean(), initialIndices);
+ event = new ClusterChangedEvent("_na_", originalState, newState);
+ assertTrue("routing tables should not be the same object", event.routingTableChanged());
+ assertTrue("index routing table should not be the same object", event.indexRoutingTableChanged(initialIndices.get(0)));
+
+ // index routing tables are different because they don't exist
+ newState = createState(numNodesInCluster, randomBoolean(), initialIndices.subList(1, initialIndices.size()));
+ event = new ClusterChangedEvent("_na_", originalState, newState);
+ assertTrue("routing tables should not be the same object", event.routingTableChanged());
+ assertTrue("index routing table should not be the same object", event.indexRoutingTableChanged(initialIndices.get(0)));
+ }
+
+ // Tests that the indices change list is correct as well as metadata equality when the metadata has changed.
+ private static void metaDataChangesCheck(final boolean changeClusterUUID) {
+ final int numNodesInCluster = 3;
+ for (int i = 0; i < INDICES_CHANGE_NUM_TESTS; i++) {
+ final ClusterState previousState = createState(numNodesInCluster, randomBoolean(), initialIndices);
+ final int numAdd = randomIntBetween(0, 5); // add random # of indices to the next cluster state
+ final int numDel = randomIntBetween(0, initialIndices.size()); // delete random # of indices from the next cluster state
+ final List addedIndices = addIndices(numAdd);
+ final List delIndices = delIndices(numDel, initialIndices);
+ final ClusterState newState = nextState(previousState, changeClusterUUID, addedIndices, delIndices, 0);
+ final ClusterChangedEvent event = new ClusterChangedEvent("_na_", newState, previousState);
+ final List addsFromEvent = event.indicesCreated();
+ final List delsFromEvent = event.indicesDeleted();
+ Collections.sort(addsFromEvent);
+ Collections.sort(delsFromEvent);
+ assertThat(addsFromEvent, equalTo(addedIndices));
+ assertThat(delsFromEvent, changeClusterUUID ? equalTo(Collections.emptyList()) : equalTo(delIndices));
+ assertThat(event.metaDataChanged(), equalTo(changeClusterUUID || addedIndices.size() > 0 || delIndices.size() > 0));
+ }
+ }
+
+ private static ClusterState createSimpleClusterState() {
+ return ClusterState.builder(TEST_CLUSTER_NAME).build();
+ }
+
+ // Create a basic cluster state with a given set of indices
+ private static ClusterState createState(final int numNodes, final boolean isLocalMaster, final List indices) {
+ final MetaData metaData = createMetaData(indices);
+ return ClusterState.builder(TEST_CLUSTER_NAME)
+ .nodes(createDiscoveryNodes(numNodes, isLocalMaster))
+ .metaData(metaData)
+ .routingTable(createRoutingTable(1, metaData))
+ .build();
+ }
+
+ // Create a modified cluster state from another one, but with some number of indices added and deleted.
+ private static ClusterState nextState(final ClusterState previousState, final boolean changeClusterUUID,
+ final List addedIndices, final List deletedIndices,
+ final int numNodesToRemove) {
+ final ClusterState.Builder builder = ClusterState.builder(previousState);
+ builder.stateUUID(Strings.randomBase64UUID());
+ final MetaData.Builder metaBuilder = MetaData.builder(previousState.metaData());
+ if (changeClusterUUID || addedIndices.size() > 0 || deletedIndices.size() > 0) {
+ // there is some change in metadata cluster state
+ if (changeClusterUUID) {
+ metaBuilder.clusterUUID(Strings.randomBase64UUID());
+ }
+ for (String index : addedIndices) {
+ metaBuilder.put(createIndexMetadata(index), true);
+ }
+ for (String index : deletedIndices) {
+ metaBuilder.remove(index);
+ }
+ builder.metaData(metaBuilder);
+ }
+ if (numNodesToRemove > 0) {
+ final int discoveryNodesSize = previousState.getNodes().size();
+ final DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(previousState.getNodes());
+ for (int i = 0; i < numNodesToRemove && i < discoveryNodesSize; i++) {
+ nodesBuilder.remove(NODE_ID_PREFIX + i);
+ }
+ builder.nodes(nodesBuilder);
+ }
+ return builder.build();
+ }
+
+ // Create the discovery nodes for a cluster state. For our testing purposes, we want
+ // the first to be master, the second to be master eligible, the third to be a data node,
+ // and the remainder can be any kinds of nodes (master eligible, data, or both).
+ private static DiscoveryNodes createDiscoveryNodes(final int numNodes, final boolean isLocalMaster) {
+ assert (numNodes >= 3) : "the initial cluster state for event change tests should have a minimum of 3 nodes " +
+ "so there are a minimum of 2 master nodes for testing master change events.";
+ final DiscoveryNodes.Builder builder = DiscoveryNodes.builder();
+ final int localNodeIndex = isLocalMaster ? 0 : randomIntBetween(1, numNodes - 1); // randomly assign the local node if not master
+ for (int i = 0; i < numNodes; i++) {
+ final String nodeId = NODE_ID_PREFIX + i;
+ boolean isMasterEligible = false;
+ boolean isData = false;
+ if (i == 0) {
+ // the master node
+ builder.masterNodeId(nodeId);
+ isMasterEligible = true;
+ } else if (i == 1) {
+ // the alternate master node
+ isMasterEligible = true;
+ } else if (i == 2) {
+ // we need at least one data node
+ isData = true;
+ } else {
+ // remaining nodes can be anything (except for master)
+ isMasterEligible = randomBoolean();
+ isData = randomBoolean();
+ }
+ final DiscoveryNode node = newNode(nodeId, isMasterEligible, isData);
+ builder.put(node);
+ if (i == localNodeIndex) {
+ builder.localNodeId(nodeId);
+ }
+ }
+ return builder.build();
+ }
+
+ // Create a new DiscoveryNode
+ private static DiscoveryNode newNode(final String nodeId, boolean isMasterEligible, boolean isData) {
+ final Map attributes = MapBuilder.newMapBuilder()
+ .put(DiscoveryNode.MASTER_ATTR, isMasterEligible ? "true" : "false")
+ .put(DiscoveryNode.DATA_ATTR, isData ? "true": "false")
+ .immutableMap();
+ return new DiscoveryNode(nodeId, nodeId, DummyTransportAddress.INSTANCE, attributes, Version.CURRENT);
+ }
+
+ // Create the metadata for a cluster state.
+ private static MetaData createMetaData(final List indices) {
+ final MetaData.Builder builder = MetaData.builder();
+ builder.clusterUUID(INITIAL_CLUSTER_ID);
+ for (String index : indices) {
+ builder.put(createIndexMetadata(index), true);
+ }
+ return builder.build();
+ }
+
+ // Create the index metadata for a given index.
+ private static IndexMetaData createIndexMetadata(final String index) {
+ return createIndexMetadata(index, 1);
+ }
+
+ // Create the index metadata for a given index, with the specified version.
+ private static IndexMetaData createIndexMetadata(final String index, final long version) {
+ return IndexMetaData.builder(index)
+ .settings(settings)
+ .numberOfShards(1)
+ .numberOfReplicas(0)
+ .creationDate(System.currentTimeMillis())
+ .version(version)
+ .build();
+ }
+
+ // Create the routing table for a cluster state.
+ private static RoutingTable createRoutingTable(final long version, final MetaData metaData) {
+ final RoutingTable.Builder builder = RoutingTable.builder().version(version);
+ for (ObjectCursor cursor : metaData.indices().values()) {
+ builder.addAsNew(cursor.value);
+ }
+ return builder.build();
+ }
+
+ // Create a list of indices to add
+ private static List addIndices(final int numIndices) {
+ final List list = new ArrayList<>();
+ for (int i = 0; i < numIndices; i++) {
+ list.add("newIdx_" + i);
+ }
+ return list;
+ }
+
+ // Create a list of indices to delete from a list that already belongs to a particular cluster state.
+ private static List delIndices(final int numIndices, final List currIndices) {
+ final List list = new ArrayList<>();
+ for (int i = 0; i < numIndices; i++) {
+ list.add(currIndices.get(i));
+ }
+ return list;
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java
index 8c7da89fd8f..6caf0846344 100644
--- a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java
+++ b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java
@@ -581,8 +581,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
// restore GC
masterNodeDisruption.stopDisrupting();
- ensureStableCluster(3, new TimeValue(DISRUPTION_HEALING_OVERHEAD.millis() + masterNodeDisruption.expectedTimeToHeal().millis()), false,
- oldNonMasterNodes.get(0));
+ ensureStableCluster(3, new TimeValue(DISRUPTION_HEALING_OVERHEAD.millis() + masterNodeDisruption.expectedTimeToHeal().millis()), false, oldNonMasterNodes.get(0));
// make sure all nodes agree on master
String newMaster = internalCluster().getMasterName();
@@ -1072,11 +1071,13 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
assertTrue(client().prepareGet("index", "doc", "1").get().isExists());
}
- // tests if indices are really deleted even if a master transition inbetween
- @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/11665")
+ /**
+ * Tests that indices are properly deleted even if there is a master transition in between.
+ * Test for https://github.com/elastic/elasticsearch/issues/11665
+ */
public void testIndicesDeleted() throws Exception {
configureUnicastCluster(3, null, 2);
- InternalTestCluster.Async> masterNodes= internalCluster().startMasterOnlyNodesAsync(2);
+ InternalTestCluster.Async> masterNodes = internalCluster().startMasterOnlyNodesAsync(2);
InternalTestCluster.Async dataNode = internalCluster().startDataOnlyNodeAsync();
dataNode.get();
masterNodes.get();
diff --git a/core/src/test/java/org/elasticsearch/http/netty/HttpPublishPortIT.java b/core/src/test/java/org/elasticsearch/http/netty/HttpPublishPortIT.java
deleted file mode 100644
index b6cf9d91894..00000000000
--- a/core/src/test/java/org/elasticsearch/http/netty/HttpPublishPortIT.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.elasticsearch.http.netty;
-
-import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
-import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
-import org.elasticsearch.common.network.NetworkModule;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.transport.BoundTransportAddress;
-import org.elasticsearch.common.transport.InetSocketTransportAddress;
-import org.elasticsearch.http.HttpTransportSettings;
-import org.elasticsearch.test.ESIntegTestCase;
-import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
-import org.elasticsearch.test.ESIntegTestCase.Scope;
-
-import static org.hamcrest.CoreMatchers.is;
-import static org.hamcrest.Matchers.arrayWithSize;
-import static org.hamcrest.Matchers.greaterThanOrEqualTo;
-import static org.hamcrest.Matchers.instanceOf;
-
-@ClusterScope(scope = Scope.SUITE, numDataNodes = 1)
-public class HttpPublishPortIT extends ESIntegTestCase {
- @Override
- protected Settings nodeSettings(int nodeOrdinal) {
- return Settings.settingsBuilder()
- .put(super.nodeSettings(nodeOrdinal))
- .put(NetworkModule.HTTP_ENABLED.getKey(), true)
- .put(HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT.getKey(), 9080)
- .build();
- }
-
- public void testHttpPublishPort() throws Exception {
- NodesInfoResponse response = client().admin().cluster().prepareNodesInfo().clear().setHttp(true).get();
- assertThat(response.getNodes(), arrayWithSize(greaterThanOrEqualTo(1)));
- NodeInfo nodeInfo = response.getNodes()[0];
-
- BoundTransportAddress address = nodeInfo.getHttp().address();
- assertThat(address.publishAddress(), instanceOf(InetSocketTransportAddress.class));
-
- InetSocketTransportAddress publishAddress = (InetSocketTransportAddress) address.publishAddress();
- assertThat(publishAddress.address().getPort(), is(9080));
- }
-}
diff --git a/core/src/test/java/org/elasticsearch/http/netty/NettyHttpPublishPortTests.java b/core/src/test/java/org/elasticsearch/http/netty/NettyHttpPublishPortTests.java
new file mode 100644
index 00000000000..c6e2c93463f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/http/netty/NettyHttpPublishPortTests.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.http.netty;
+
+import org.elasticsearch.common.network.NetworkUtils;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
+import org.elasticsearch.http.BindHttpException;
+import org.elasticsearch.http.HttpTransportSettings;
+import org.elasticsearch.test.ESTestCase;
+
+import java.net.UnknownHostException;
+import java.util.ArrayList;
+import java.util.List;
+
+import static java.net.InetAddress.getByName;
+import static java.util.Arrays.asList;
+import static org.elasticsearch.http.netty.NettyHttpServerTransport.resolvePublishPort;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+
+public class NettyHttpPublishPortTests extends ESTestCase {
+
+ public void testHttpPublishPort() throws Exception {
+ int boundPort = randomIntBetween(9000, 9100);
+ int otherBoundPort = randomIntBetween(9200, 9300);
+
+ int publishPort = resolvePublishPort(Settings.builder().put(HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT.getKey(), 9080).build(),
+ randomAddresses(), getByName("127.0.0.2"));
+ assertThat("Publish port should be explicitly set to 9080", publishPort, equalTo(9080));
+
+ publishPort = resolvePublishPort(Settings.EMPTY, asList(address("127.0.0.1", boundPort), address("127.0.0.2", otherBoundPort)),
+ getByName("127.0.0.1"));
+ assertThat("Publish port should be derived from matched address", publishPort, equalTo(boundPort));
+
+ publishPort = resolvePublishPort(Settings.EMPTY, asList(address("127.0.0.1", boundPort), address("127.0.0.2", boundPort)),
+ getByName("127.0.0.3"));
+ assertThat("Publish port should be derived from unique port of bound addresses", publishPort, equalTo(boundPort));
+
+ try {
+ resolvePublishPort(Settings.EMPTY, asList(address("127.0.0.1", boundPort), address("127.0.0.2", otherBoundPort)),
+ getByName("127.0.0.3"));
+ fail("Expected BindHttpException as publish_port not specified and non-unique port of bound addresses");
+ } catch (BindHttpException e) {
+ assertThat(e.getMessage(), containsString("Failed to auto-resolve http publish port"));
+ }
+
+ publishPort = resolvePublishPort(Settings.EMPTY, asList(address("0.0.0.0", boundPort), address("127.0.0.2", otherBoundPort)),
+ getByName("127.0.0.1"));
+ assertThat("Publish port should be derived from matching wildcard address", publishPort, equalTo(boundPort));
+
+ if (NetworkUtils.SUPPORTS_V6) {
+ publishPort = resolvePublishPort(Settings.EMPTY, asList(address("0.0.0.0", boundPort), address("127.0.0.2", otherBoundPort)),
+ getByName("::1"));
+ assertThat("Publish port should be derived from matching wildcard address", publishPort, equalTo(boundPort));
+ }
+ }
+
+ private InetSocketTransportAddress address(String host, int port) throws UnknownHostException {
+ return new InetSocketTransportAddress(getByName(host), port);
+ }
+
+ private InetSocketTransportAddress randomAddress() throws UnknownHostException {
+ return address("127.0.0." + randomIntBetween(1, 100), randomIntBetween(9200, 9300));
+ }
+
+ private List randomAddresses() throws UnknownHostException {
+ List addresses = new ArrayList<>();
+ for (int i = 0; i < randomIntBetween(1, 5); i++) {
+ addresses.add(randomAddress());
+ }
+ return addresses;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/ingest/IngestMetadataTests.java b/core/src/test/java/org/elasticsearch/ingest/IngestMetadataTests.java
index a6cf12389a0..c6477e355a1 100644
--- a/core/src/test/java/org/elasticsearch/ingest/IngestMetadataTests.java
+++ b/core/src/test/java/org/elasticsearch/ingest/IngestMetadataTests.java
@@ -19,8 +19,10 @@
package org.elasticsearch.ingest;
+import org.elasticsearch.cluster.DiffableUtils;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
@@ -32,6 +34,10 @@ import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.not;
+
public class IngestMetadataTests extends ESTestCase {
public void testFromXContent() throws IOException {
@@ -61,4 +67,63 @@ public class IngestMetadataTests extends ESTestCase {
assertEquals(pipeline.getConfigAsMap(), m.getPipelines().get("1").getConfigAsMap());
assertEquals(pipeline2.getConfigAsMap(), m.getPipelines().get("2").getConfigAsMap());
}
+
+ public void testDiff() throws Exception {
+ BytesReference pipelineConfig = new BytesArray("{}");
+
+ Map pipelines = new HashMap<>();
+ pipelines.put("1", new PipelineConfiguration("1", pipelineConfig));
+ pipelines.put("2", new PipelineConfiguration("2", pipelineConfig));
+ IngestMetadata ingestMetadata1 = new IngestMetadata(pipelines);
+
+ pipelines = new HashMap<>();
+ pipelines.put("1", new PipelineConfiguration("1", pipelineConfig));
+ pipelines.put("3", new PipelineConfiguration("3", pipelineConfig));
+ pipelines.put("4", new PipelineConfiguration("4", pipelineConfig));
+ IngestMetadata ingestMetadata2 = new IngestMetadata(pipelines);
+
+ IngestMetadata.IngestMetadataDiff diff = (IngestMetadata.IngestMetadataDiff) ingestMetadata2.diff(ingestMetadata1);
+ assertThat(((DiffableUtils.MapDiff)diff.pipelines).getDeletes().size(), equalTo(1));
+ assertThat(((DiffableUtils.MapDiff)diff.pipelines).getDeletes().get(0), equalTo("2"));
+ assertThat(((DiffableUtils.MapDiff)diff.pipelines).getUpserts().size(), equalTo(2));
+ assertThat(((DiffableUtils.MapDiff)diff.pipelines).getUpserts().containsKey("3"), is(true));
+ assertThat(((DiffableUtils.MapDiff)diff.pipelines).getUpserts().containsKey("4"), is(true));
+
+ IngestMetadata endResult = (IngestMetadata) diff.apply(ingestMetadata2);
+ assertThat(endResult, not(equalTo(ingestMetadata1)));
+ assertThat(endResult.getPipelines().size(), equalTo(3));
+ assertThat(endResult.getPipelines().get("1"), equalTo(new PipelineConfiguration("1", pipelineConfig)));
+ assertThat(endResult.getPipelines().get("3"), equalTo(new PipelineConfiguration("3", pipelineConfig)));
+ assertThat(endResult.getPipelines().get("4"), equalTo(new PipelineConfiguration("4", pipelineConfig)));
+
+ pipelines = new HashMap<>();
+ pipelines.put("1", new PipelineConfiguration("1", new BytesArray("{}")));
+ pipelines.put("2", new PipelineConfiguration("2", new BytesArray("{}")));
+ IngestMetadata ingestMetadata3 = new IngestMetadata(pipelines);
+
+ diff = (IngestMetadata.IngestMetadataDiff) ingestMetadata3.diff(ingestMetadata1);
+ assertThat(((DiffableUtils.MapDiff)diff.pipelines).getDeletes().size(), equalTo(0));
+ assertThat(((DiffableUtils.MapDiff)diff.pipelines).getUpserts().size(), equalTo(0));
+
+ endResult = (IngestMetadata) diff.apply(ingestMetadata3);
+ assertThat(endResult, equalTo(ingestMetadata1));
+ assertThat(endResult.getPipelines().size(), equalTo(2));
+ assertThat(endResult.getPipelines().get("1"), equalTo(new PipelineConfiguration("1", pipelineConfig)));
+ assertThat(endResult.getPipelines().get("2"), equalTo(new PipelineConfiguration("2", pipelineConfig)));
+
+ pipelines = new HashMap<>();
+ pipelines.put("1", new PipelineConfiguration("1", new BytesArray("{}")));
+ pipelines.put("2", new PipelineConfiguration("2", new BytesArray("{\"key\" : \"value\"}")));
+ IngestMetadata ingestMetadata4 = new IngestMetadata(pipelines);
+
+ diff = (IngestMetadata.IngestMetadataDiff) ingestMetadata4.diff(ingestMetadata1);
+ assertThat(((DiffableUtils.MapDiff)diff.pipelines).getDiffs().size(), equalTo(1));
+ assertThat(((DiffableUtils.MapDiff)diff.pipelines).getDiffs().containsKey("2"), is(true));
+
+ endResult = (IngestMetadata) diff.apply(ingestMetadata4);
+ assertThat(endResult, not(equalTo(ingestMetadata1)));
+ assertThat(endResult.getPipelines().size(), equalTo(2));
+ assertThat(endResult.getPipelines().get("1"), equalTo(new PipelineConfiguration("1", pipelineConfig)));
+ assertThat(endResult.getPipelines().get("2"), equalTo(new PipelineConfiguration("2", new BytesArray("{\"key\" : \"value\"}"))));
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/transport/netty/NettyPublishPortTests.java b/core/src/test/java/org/elasticsearch/transport/netty/NettyPublishPortTests.java
new file mode 100644
index 00000000000..6f602dafc99
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/transport/netty/NettyPublishPortTests.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport.netty;
+
+import org.elasticsearch.common.network.NetworkUtils;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.transport.BindTransportException;
+import org.elasticsearch.transport.TransportSettings;
+
+import java.net.InetSocketAddress;
+import java.net.UnknownHostException;
+import java.util.ArrayList;
+import java.util.List;
+
+import static java.net.InetAddress.getByName;
+import static java.util.Arrays.asList;
+import static org.elasticsearch.transport.netty.NettyTransport.resolvePublishPort;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+
+public class NettyPublishPortTests extends ESTestCase {
+
+ public void testPublishPort() throws Exception {
+ int boundPort = randomIntBetween(9000, 9100);
+ int otherBoundPort = randomIntBetween(9200, 9300);
+
+ boolean useProfile = randomBoolean();
+ final String profile;
+ final Settings settings;
+ final Settings profileSettings;
+ if (useProfile) {
+ profile = "some_profile";
+ settings = randomBoolean() ? Settings.EMPTY : Settings.builder().put(TransportSettings.PUBLISH_PORT.getKey(), 9081).build();
+ profileSettings = Settings.builder().put("publish_port", 9080).build();
+ } else {
+ profile = TransportSettings.DEFAULT_PROFILE;
+ settings = Settings.builder().put(TransportSettings.PUBLISH_PORT.getKey(), 9081).build();
+ profileSettings = randomBoolean() ? Settings.EMPTY : Settings.builder().put("publish_port", 9080).build();;
+ }
+
+ int publishPort = resolvePublishPort(profile, settings, profileSettings,
+ randomAddresses(), getByName("127.0.0.2"));
+ assertThat("Publish port should be explicitly set", publishPort, equalTo(useProfile ? 9080 : 9081));
+
+ publishPort = resolvePublishPort(profile, Settings.EMPTY, Settings.EMPTY,
+ asList(address("127.0.0.1", boundPort), address("127.0.0.2", otherBoundPort)),
+ getByName("127.0.0.1"));
+ assertThat("Publish port should be derived from matched address", publishPort, equalTo(boundPort));
+
+ publishPort = resolvePublishPort(profile, Settings.EMPTY, Settings.EMPTY,
+ asList(address("127.0.0.1", boundPort), address("127.0.0.2", boundPort)),
+ getByName("127.0.0.3"));
+ assertThat("Publish port should be derived from unique port of bound addresses", publishPort, equalTo(boundPort));
+
+ try {
+ resolvePublishPort(profile, Settings.EMPTY, Settings.EMPTY,
+ asList(address("127.0.0.1", boundPort), address("127.0.0.2", otherBoundPort)),
+ getByName("127.0.0.3"));
+ fail("Expected BindTransportException as publish_port not specified and non-unique port of bound addresses");
+ } catch (BindTransportException e) {
+ assertThat(e.getMessage(), containsString("Failed to auto-resolve publish port"));
+ }
+
+ publishPort = resolvePublishPort(profile, Settings.EMPTY, Settings.EMPTY,
+ asList(address("0.0.0.0", boundPort), address("127.0.0.2", otherBoundPort)),
+ getByName("127.0.0.1"));
+ assertThat("Publish port should be derived from matching wildcard address", publishPort, equalTo(boundPort));
+
+ if (NetworkUtils.SUPPORTS_V6) {
+ publishPort = resolvePublishPort(profile, Settings.EMPTY, Settings.EMPTY,
+ asList(address("0.0.0.0", boundPort), address("127.0.0.2", otherBoundPort)),
+ getByName("::1"));
+ assertThat("Publish port should be derived from matching wildcard address", publishPort, equalTo(boundPort));
+ }
+ }
+
+ private InetSocketAddress address(String host, int port) throws UnknownHostException {
+ return new InetSocketAddress(getByName(host), port);
+ }
+
+ private InetSocketAddress randomAddress() throws UnknownHostException {
+ return address("127.0.0." + randomIntBetween(1, 100), randomIntBetween(9200, 9300));
+ }
+
+ private List randomAddresses() throws UnknownHostException {
+ List addresses = new ArrayList<>();
+ for (int i = 0; i < randomIntBetween(1, 5); i++) {
+ addresses.add(randomAddress());
+ }
+ return addresses;
+ }
+}
diff --git a/docs/reference/cat/count.asciidoc b/docs/reference/cat/count.asciidoc
index f264f825a48..ff64f2bc73b 100644
--- a/docs/reference/cat/count.asciidoc
+++ b/docs/reference/cat/count.asciidoc
@@ -9,8 +9,12 @@ cluster, or individual indices.
% curl 192.168.56.10:9200/_cat/indices
green wiki1 3 0 10000 331 168.5mb 168.5mb
green wiki2 3 0 428 0 8mb 8mb
+
% curl 192.168.56.10:9200/_cat/count
1384314124582 19:42:04 10428
+
% curl 192.168.56.10:9200/_cat/count/wiki2
1384314139815 19:42:19 428
--------------------------------------------------
+
+NOTE: The document count indicates the number of live documents and does not include deleted documents which have not yet been cleaned up by the merge process.
diff --git a/docs/reference/cat/nodes.asciidoc b/docs/reference/cat/nodes.asciidoc
index 1690baa55f3..90ff91513aa 100644
--- a/docs/reference/cat/nodes.asciidoc
+++ b/docs/reference/cat/nodes.asciidoc
@@ -85,6 +85,7 @@ k0zy 192.168.56.10 9300 {version} m
|`pid` |`p` |No |Process ID |13061
|`ip` |`i` |Yes |IP address |127.0.1.1
|`port` |`po` |No |Bound transport port |9300
+|`http_address` |`http`| No | bound http address | 127.0.0.1:9200
|`version` |`v` |No |Elasticsearch version |{version}
|`build` |`b` |No |Elasticsearch Build hash |5c03844
|`jdk` |`j` |No |Running Java version |1.8.0
diff --git a/docs/reference/docs/reindex.asciidoc b/docs/reference/docs/reindex.asciidoc
new file mode 100644
index 00000000000..541e05dedc3
--- /dev/null
+++ b/docs/reference/docs/reindex.asciidoc
@@ -0,0 +1,461 @@
+[[docs-reindex]]
+==== Reindex API
+
+`_reindex`'s most basic form just copies documents from one index to another.
+This will copy documents from `twitter` into `new_twitter`:
+
+[source,js]
+--------------------------------------------------
+POST /_reindex
+{
+ "source": {
+ "index": "twitter"
+ },
+ "dest": {
+ "index": "new_twitter"
+ }
+}
+--------------------------------------------------
+// AUTOSENSE
+
+That will return something like this:
+
+[source,js]
+--------------------------------------------------
+{
+ "took" : 639,
+ "updated": 112,
+ "batches": 130,
+ "version_conflicts": 0,
+ "failures" : [ ],
+ "created": 12344
+}
+--------------------------------------------------
+
+Just like `_update_by_query`, `_reindex` gets a snapshot of the source index
+but its target must be a **different** index so version conflicts are unlikely.
+The `dest` element can be configured like the index API to control optimistic
+concurrency control. Just leaving out `version_type` (as above) or setting it
+to `internal` will cause Elasticsearch to blindly dump documents into the
+target, overwriting any that happen to have the same type and id:
+
+[source,js]
+--------------------------------------------------
+POST /_reindex
+{
+ "source": {
+ "index": "twitter"
+ },
+ "dest": {
+ "index": "new_twitter",
+ "version_type": "internal"
+ }
+}
+--------------------------------------------------
+// AUTOSENSE
+
+Setting `version_type` to `external` will cause Elasticsearch to preserve the
+`version` from the source, create any documents that are missing, and update
+any documents that have an older version in the destination index than they do
+in the source index:
+
+[source,js]
+--------------------------------------------------
+POST /_reindex
+{
+ "source": {
+ "index": "twitter"
+ },
+ "dest": {
+ "index": "new_twitter",
+ "version_type": "external"
+ }
+}
+--------------------------------------------------
+// AUTOSENSE
+
+Settings `op_type` to `create` will cause `_reindex` to only create missing
+documents in the target index. All existing documents will cause a version
+conflict:
+
+[source,js]
+--------------------------------------------------
+POST /_reindex
+{
+ "source": {
+ "index": "twitter"
+ },
+ "dest": {
+ "index": "new_twitter",
+ "op_type": "create"
+ }
+}
+--------------------------------------------------
+// AUTOSENSE
+
+By default version conflicts abort the `_reindex` process but you can just
+count them by settings `"conflicts": "proceed"` in the request body:
+
+[source,js]
+--------------------------------------------------
+POST /_reindex
+{
+ "conflicts": "proceed",
+ "source": {
+ "index": "twitter"
+ },
+ "dest": {
+ "index": "new_twitter",
+ "op_type": "create"
+ }
+}
+--------------------------------------------------
+// AUTOSENSE
+
+You can limit the documents by adding a type to the `source` or by adding a
+query. This will only copy `tweet`s made by `kimchy` into `new_twitter`:
+
+[source,js]
+--------------------------------------------------
+POST /_reindex
+{
+ "source": {
+ "index": "twitter",
+ "type": "tweet",
+ "query": {
+ "term": {
+ "user": "kimchy"
+ }
+ }
+ },
+ "dest": {
+ "index": "new_twitter"
+ }
+}
+--------------------------------------------------
+// AUTOSENSE
+
+`index` and `type` in `source` can both be lists, allowing you to copy from
+lots of sources in one request. This will copy documents from the `tweet` and
+`post` types in the `twitter` and `blog` index. It'd include the `post` type in
+the `twitter` index and the `tweet` type in the `blog` index. If you want to be
+more specific you'll need to use the `query`. It also makes no effort to handle
+id collisions. The target index will remain valid but it's not easy to predict
+which document will survive because the iteration order isn't well defined.
+Just avoid that situation, ok?
+[source,js]
+--------------------------------------------------
+POST /_reindex
+{
+ "source": {
+ "index": ["twitter", "blog"],
+ "type": ["tweet", "post"]
+ },
+ "index": {
+ "index": "all_together"
+ }
+}
+--------------------------------------------------
+// AUTOSENSE
+
+It's also possible to limit the number of processed documents by setting
+`size`. This will only copy a single document from `twitter` to
+`new_twitter`:
+
+[source,js]
+--------------------------------------------------
+POST /_reindex
+{
+ "size": 1,
+ "source": {
+ "index": "twitter"
+ },
+ "dest": {
+ "index": "new_twitter"
+ }
+}
+--------------------------------------------------
+// AUTOSENSE
+
+If you want a particular set of documents from the twitter index you'll
+need to sort. Sorting makes the scroll less efficient but in some contexts
+it's worth it. If possible, prefer a more selective query to `size` and `sort`.
+This will copy 10000 documents from `twitter` into `new_twitter`:
+
+[source,js]
+--------------------------------------------------
+POST /_reindex
+{
+ "size": 10000,
+ "source": {
+ "index": "twitter",
+ "sort": { "date": "desc" }
+ },
+ "dest": {
+ "index": "new_twitter"
+ }
+}
+--------------------------------------------------
+// AUTOSENSE
+
+Like `_update_by_query`, `_reindex` supports a script that modifies the
+document. Unlike `_update_by_query`, the script is allowed to modify the
+document's metadata. This example bumps the version of the source document:
+
+[source,js]
+--------------------------------------------------
+POST /_reindex
+{
+ "source": {
+ "index": "twitter",
+ },
+ "dest": {
+ "index": "new_twitter",
+ "version_type": "external"
+ }
+ "script": {
+ "internal": "if (ctx._source.foo == 'bar') {ctx._version++; ctx._source.remove('foo')}"
+ }
+}
+--------------------------------------------------
+// AUTOSENSE
+
+Think of the possibilities! Just be careful! With great power.... You can
+change:
+ * "_id"
+ * "_type"
+ * "_index"
+ * "_version"
+ * "_routing"
+ * "_parent"
+ * "_timestamp"
+ * "_ttl"
+
+Setting `_version` to `null` or clearing it from the `ctx` map is just like not
+sending the version in an indexing request. It will cause that document to be
+overwritten in the target index regardless of the version on the target or the
+version type you use in the `_reindex` request.
+
+By default if `_reindex` sees a document with routing then the routing is
+preserved unless it's changed by the script. You can set `routing` on the
+`dest` request to change this:
+
+`keep`::
+
+Sets the routing on the bulk request sent for each match to the routing on
+the match. The default.
+
+`discard`::
+
+Sets the routing on the bulk request sent for each match to null.
+
+`=`::
+
+Sets the routing on the bulk request sent for each match to all text after
+the `=`.
+
+For example, you can use the following request to copy all documents from
+the `source` index with the company name `cat` into the `dest` index with
+routing set to `cat`.
+[source,js]
+--------------------------------------------------
+POST /_reindex
+{
+ "source": {
+ "index": "source"
+ "query": {
+ "match": {
+ "company": "cat"
+ }
+ }
+ }
+ "index": {
+ "index": "dest",
+ "routing": "=cat"
+ }
+}
+--------------------------------------------------
+// AUTOSENSE
+
+
+[float]
+=== URL Parameters
+
+In addition to the standard parameters like `pretty`, the Reindex API also
+supports `refresh`, `wait_for_completion`, `consistency`, and `timeout`.
+
+Sending the `refresh` url parameter will cause all indexes to which the request
+wrote to be refreshed. This is different than the Index API's `refresh`
+parameter which causes just the shard that received the new data to be indexed.
+
+If the request contains `wait_for_completion=false` then Elasticsearch will
+perform some preflight checks, launch the request, and then return a `task`
+which can be used with <> to cancel or get
+the status of the task. For now, once the request is finished the task is gone
+and the only place to look for the ultimate result of the task is in the
+Elasticsearch log file. This will be fixed soon.
+
+`consistency` controls how many copies of a shard must respond to each write
+request. `timeout` controls how long each write request waits for unavailable
+shards to become available. Both work exactly how they work in the
+{ref}/docs-bulk.html[Bulk API].
+
+`timeout` controls how long each batch waits for the target shard to become
+available. It works exactly how it works in the {ref}/docs-bulk.html[Bulk API].
+
+[float]
+=== Response body
+
+The JSON response looks like this:
+
+[source,js]
+--------------------------------------------------
+{
+ "took" : 639,
+ "updated": 0,
+ "created": 123,
+ "batches": 1,
+ "version_conflicts": 2,
+ "failures" : [ ]
+}
+--------------------------------------------------
+
+`took`::
+
+The number of milliseconds from start to end of the whole operation.
+
+`updated`::
+
+The number of documents that were successfully updated.
+
+`created`::
+
+The number of documents that were successfully created.
+
+`batches`::
+
+The number of scroll responses pulled back by the the reindex.
+
+`version_conflicts`::
+
+The number of version conflicts that reindex hit.
+
+`failures`::
+
+Array of all indexing failures. If this is non-empty then the request aborted
+because of those failures. See `conflicts` for how to prevent version conflicts
+from aborting the operation.
+
+[float]
+[[docs-reindex-task-api]]
+=== Works with the Task API
+
+While Reindex is running you can fetch their status using the
+{ref}/task/list.html[Task List APIs]:
+
+[source,js]
+--------------------------------------------------
+POST /_tasks/?pretty&detailed=true&actions=*reindex
+--------------------------------------------------
+// AUTOSENSE
+
+The responses looks like:
+
+[source,js]
+--------------------------------------------------
+{
+ "nodes" : {
+ "r1A2WoRbTwKZ516z6NEs5A" : {
+ "name" : "Tyrannus",
+ "transport_address" : "127.0.0.1:9300",
+ "host" : "127.0.0.1",
+ "ip" : "127.0.0.1:9300",
+ "attributes" : {
+ "testattr" : "test",
+ "portsfile" : "true"
+ },
+ "tasks" : [ {
+ "node" : "r1A2WoRbTwKZ516z6NEs5A",
+ "id" : 36619,
+ "type" : "transport",
+ "action" : "indices:data/write/reindex",
+ "status" : { <1>
+ "total" : 6154,
+ "updated" : 3500,
+ "created" : 0,
+ "deleted" : 0,
+ "batches" : 36,
+ "version_conflicts" : 0,
+ "noops" : 0
+ },
+ "description" : ""
+ } ]
+ }
+ }
+}
+--------------------------------------------------
+
+<1> this object contains the actual status. It is just like the response json
+with the important addition of the `total` field. `total` is the total number
+of operations that the reindex expects to perform. You can estimate the
+progress by adding the `updated`, `created`, and `deleted` fields. The request
+will finish when their sum is equal to the `total` field.
+
+
+[float]
+=== Examples
+
+==== Change the name of a field
+
+`_reindex` can be used to build a copy of an index with renamed fields. Say you
+create an index containing documents that look like this:
+
+[source,js]
+--------------------------------------------------
+POST test/test/1?refresh&pretty
+{
+ "text": "words words",
+ "flag": "foo"
+}
+--------------------------------------------------
+// AUTOSENSE
+
+But you don't like the name `flag` and want to replace it with `tag`.
+`_reindex` can create the other index for you:
+
+[source,js]
+--------------------------------------------------
+POST _reindex?pretty
+{
+ "source": {
+ "index": "test"
+ },
+ "dest": {
+ "index": "test2"
+ },
+ "script": {
+ "inline": "ctx._source.tag = ctx._source.remove(\"flag\")"
+ }
+}
+--------------------------------------------------
+// AUTOSENSE
+
+Now you can get the new document:
+
+[source,js]
+--------------------------------------------------
+GET test2/test/1?pretty
+--------------------------------------------------
+// AUTOSENSE
+
+and it'll look like:
+
+[source,js]
+--------------------------------------------------
+{
+ "text": "words words",
+ "tag": "foo"
+}
+--------------------------------------------------
+
+Or you can search by `tag` or whatever you want.
diff --git a/docs/reference/docs/update-by-query.asciidoc b/docs/reference/docs/update-by-query.asciidoc
new file mode 100644
index 00000000000..298c0a9ab79
--- /dev/null
+++ b/docs/reference/docs/update-by-query.asciidoc
@@ -0,0 +1,358 @@
+[[docs-update-by-query]]
+==== Update By Query API
+
+The simplest usage of `_update_by_query` just performs an update on every
+document in the index without changing the source. This is useful to
+<> or some other online
+mapping change. Here is the API:
+
+[source,js]
+--------------------------------------------------
+POST /twitter/_update_by_query?conflicts=proceed
+--------------------------------------------------
+// AUTOSENSE
+
+That will return something like this:
+
+[source,js]
+--------------------------------------------------
+{
+ "took" : 639,
+ "updated": 1235,
+ "batches": 13,
+ "version_conflicts": 2,
+ "failures" : [ ]
+}
+--------------------------------------------------
+
+`_update_by_query` gets a snapshot of the index when it starts and indexes what
+it finds using `internal` versioning. That means that you'll get a version
+conflict if the document changes between the time when the snapshot was taken
+and when the index request is processed. When the versions match the document
+is updated and the version number is incremented.
+
+All update and query failures cause the `_update_by_query` to abort and are
+returned in the `failures` of the response. The updates that have been
+performed still stick. In other words, the process is not rolled back, only
+aborted. While the first failure causes the abort all failures that are
+returned by the failing bulk request are returned in the `failures` element so
+it's possible for there to be quite a few.
+
+If you want to simply count version conflicts not cause the `_update_by_query`
+to abort you can set `conflicts=proceed` on the url or `"conflicts": "proceed"`
+in the request body. The first example does this because it is just trying to
+pick up an online mapping change and a version conflict simply means that the
+conflicting document was updated between the start of the `_update_by_query`
+and the time when it attempted to update the document. This is fine because
+that update will have picked up the online mapping update.
+
+Back to the API format, you can limit `_update_by_query` to a single type. This
+will only update `tweet`s from the `twitter` index:
+
+[source,js]
+--------------------------------------------------
+POST /twitter/tweet/_update_by_query?conflicts=proceed
+--------------------------------------------------
+// AUTOSENSE
+
+You can also limit `_update_by_query` using the
+{ref}/query-dsl.html[Query DSL]. This will update all documents from the
+`twitter` index for the user `kimchy`:
+
+[source,js]
+--------------------------------------------------
+POST /twitter/_update_by_query?conflicts=proceed
+{
+ "query": { <1>
+ "term": {
+ "user": "kimchy"
+ }
+ }
+}
+--------------------------------------------------
+// AUTOSENSE
+
+<1> The query must be passed as a value to the `query` key, in the same
+way as the {ref}/search-search.html[Search API]. You can also use the `q`
+parameter in the same way as the search api.
+
+So far we've only been updating documents without changing their source. That
+is genuinely useful for things like
+<> but it's only half the
+fun. `_update_by_query` supports a `script` object to update the document. This
+will increment the `likes` field on all of kimchy's tweets:
+[source,js]
+--------------------------------------------------
+POST /twitter/_update_by_query
+{
+ "script": {
+ "inline": "ctx._source.likes++"
+ },
+ "query": {
+ "term": {
+ "user": "kimchy"
+ }
+ }
+}
+--------------------------------------------------
+// AUTOSENSE
+
+Just as in {ref}/docs-update.html[Update API] you can set `ctx.op = "noop"` if
+your script decides that it doesn't have to make any changes. That will cause
+`_update_by_query` to omit that document from its updates. Setting `ctx.op` to
+anything else is an error. If you want to delete by a query you can use the
+<> instead. Setting any other
+field in `ctx` is an error.
+
+Note that we stopped specifying `conflicts=proceed`. In this case we want a
+version conflict to abort the process so we can handle the failure.
+
+This API doesn't allow you to move the documents it touches, just modify their
+source. This is intentional! We've made no provisions for removing the document
+from its original location.
+
+It's also possible to do this whole thing on multiple indexes and multiple
+types at once, just like the search API:
+
+[source,js]
+--------------------------------------------------
+POST /twitter,blog/tweet,post/_update_by_query
+--------------------------------------------------
+// AUTOSENSE
+
+If you provide `routing` then the routing is copied to the scroll query,
+limiting the process to the shards that match that routing value:
+
+[source,js]
+--------------------------------------------------
+POST /twitter/_update_by_query?routing=1
+--------------------------------------------------
+// AUTOSENSE
+
+By default `_update_by_query` uses scroll batches of 100. You can change the
+batch size with the `scroll_size` URL parameter:
+
+[source,js]
+--------------------------------------------------
+POST /twitter/_update_by_query?scroll_size=1000
+--------------------------------------------------
+// AUTOSENSE
+
+[float]
+=== URL Parameters
+
+In addition to the standard parameters like `pretty`, the Update By Query API
+also supports `refresh`, `wait_for_completion`, `consistency`, and `timeout`.
+
+Sending the `refresh` will update all shards in the index being updated when
+the request completes. This is different than the Index API's `refresh`
+parameter which causes just the shard that received the new data to be indexed.
+
+If the request contains `wait_for_completion=false` then Elasticsearch will
+perform some preflight checks, launch the request, and then return a `task`
+which can be used with <> to cancel
+or get the status of the task. For now, once the request is finished the task
+is gone and the only place to look for the ultimate result of the task is in
+the Elasticsearch log file. This will be fixed soon.
+
+`consistency` controls how many copies of a shard must respond to each write
+request. `timeout` controls how long each write request waits for unavailable
+shards to become available. Both work exactly how they work in the
+{ref}/docs-bulk.html[Bulk API].
+
+`timeout` controls how long each batch waits for the target shard to become
+available. It works exactly how it works in the {ref}/docs-bulk.html[Bulk API].
+
+[float]
+=== Response body
+
+The JSON response looks like this:
+
+[source,js]
+--------------------------------------------------
+{
+ "took" : 639,
+ "updated": 0,
+ "batches": 1,
+ "version_conflicts": 2,
+ "failures" : [ ]
+}
+--------------------------------------------------
+
+`took`::
+
+The number of milliseconds from start to end of the whole operation.
+
+`updated`::
+
+The number of documents that were successfully updated.
+
+`batches`::
+
+The number of scroll responses pulled back by the the update by query.
+
+`version_conflicts`::
+
+The number of version conflicts that the update by query hit.
+
+`failures`::
+
+Array of all indexing failures. If this is non-empty then the request aborted
+because of those failures. See `conflicts` for how to prevent version conflicts
+from aborting the operation.
+
+
+[float]
+[[docs-update-by-query-task-api]]
+=== Works with the Task API
+
+While Update By Query is running you can fetch their status using the
+{ref}/task/list.html[Task List APIs]:
+
+[source,js]
+--------------------------------------------------
+POST /_tasks/?pretty&detailed=true&action=byquery
+--------------------------------------------------
+// AUTOSENSE
+
+The responses looks like:
+
+[source,js]
+--------------------------------------------------
+{
+ "nodes" : {
+ "r1A2WoRbTwKZ516z6NEs5A" : {
+ "name" : "Tyrannus",
+ "transport_address" : "127.0.0.1:9300",
+ "host" : "127.0.0.1",
+ "ip" : "127.0.0.1:9300",
+ "attributes" : {
+ "testattr" : "test",
+ "portsfile" : "true"
+ },
+ "tasks" : [ {
+ "node" : "r1A2WoRbTwKZ516z6NEs5A",
+ "id" : 36619,
+ "type" : "transport",
+ "action" : "indices:data/write/update/byquery",
+ "status" : { <1>
+ "total" : 6154,
+ "updated" : 3500,
+ "created" : 0,
+ "deleted" : 0,
+ "batches" : 36,
+ "version_conflicts" : 0,
+ "noops" : 0
+ },
+ "description" : ""
+ } ]
+ }
+ }
+}
+--------------------------------------------------
+
+<1> this object contains the actual status. It is just like the response json
+with the important addition of the `total` field. `total` is the total number
+of operations that the reindex expects to perform. You can estimate the
+progress by adding the `updated`, `created`, and `deleted` fields. The request
+will finish when their sum is equal to the `total` field.
+
+
+[float]
+=== Examples
+
+[[picking-up-a-new-property]]
+==== Pick up a new property
+
+Say you created an index without dynamic mapping, filled it with data, and then
+added a mapping value to pick up more fields from the data:
+
+[source,js]
+--------------------------------------------------
+PUT test
+{
+ "mappings": {
+ "test": {
+ "dynamic": false, <1>
+ "properties": {
+ "text": {"type": "string"}
+ }
+ }
+ }
+}
+
+POST test/test?refresh
+{
+ "text": "words words",
+ "flag": "bar"
+}'
+POST test/test?refresh
+{
+ "text": "words words",
+ "flag": "foo"
+}'
+PUT test/_mapping/test <2>
+{
+ "properties": {
+ "text": {"type": "string"},
+ "flag": {"type": "string", "analyzer": "keyword"}
+ }
+}
+--------------------------------------------------
+// AUTOSENSE
+
+<1> This means that new fields won't be indexed, just stored in `_source`.
+
+<2> This updates the mapping to add the new `flag` field. To pick up the new
+field you have to reindex all documents with it.
+
+Searching for the data won't find anything:
+
+[source,js]
+--------------------------------------------------
+POST test/_search?filter_path=hits.total
+{
+ "query": {
+ "match": {
+ "flag": "foo"
+ }
+ }
+}
+--------------------------------------------------
+// AUTOSENSE
+
+[source,js]
+--------------------------------------------------
+{
+ "hits" : {
+ "total" : 0
+ }
+}
+--------------------------------------------------
+
+But you can issue an `_update_by_query` request to pick up the new mapping:
+
+[source,js]
+--------------------------------------------------
+POST test/_update_by_query?refresh&conflicts=proceed
+POST test/_search?filter_path=hits.total
+{
+ "query": {
+ "match": {
+ "flag": "foo"
+ }
+ }
+}
+--------------------------------------------------
+// AUTOSENSE
+
+[source,js]
+--------------------------------------------------
+{
+ "hits" : {
+ "total" : 1
+ }
+}
+--------------------------------------------------
+
+Hurray! You can do the exact same thing when adding a field to a multifield.
diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/IndicesRequestTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/IndicesRequestTests.java
index d2e05869b94..709308994f9 100644
--- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/IndicesRequestTests.java
+++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/IndicesRequestTests.java
@@ -92,7 +92,7 @@ import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.groovy.GroovyPlugin;
-import org.elasticsearch.search.action.SearchServiceTransportAction;
+import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
@@ -580,8 +580,8 @@ public class IndicesRequestTests extends ESIntegTestCase {
}
public void testSearchQueryThenFetch() throws Exception {
- interceptTransportActions(SearchServiceTransportAction.QUERY_ACTION_NAME,
- SearchServiceTransportAction.FETCH_ID_ACTION_NAME, SearchServiceTransportAction.FREE_CONTEXT_ACTION_NAME);
+ interceptTransportActions(SearchTransportService.QUERY_ACTION_NAME,
+ SearchTransportService.FETCH_ID_ACTION_NAME, SearchTransportService.FREE_CONTEXT_ACTION_NAME);
String[] randomIndicesOrAliases = randomIndicesOrAliases();
for (int i = 0; i < randomIndicesOrAliases.length; i++) {
@@ -595,14 +595,14 @@ public class IndicesRequestTests extends ESIntegTestCase {
assertThat(searchResponse.getHits().totalHits(), greaterThan(0L));
clearInterceptedActions();
- assertSameIndices(searchRequest, SearchServiceTransportAction.QUERY_ACTION_NAME, SearchServiceTransportAction.FETCH_ID_ACTION_NAME);
+ assertSameIndices(searchRequest, SearchTransportService.QUERY_ACTION_NAME, SearchTransportService.FETCH_ID_ACTION_NAME);
//free context messages are not necessarily sent, but if they are, check their indices
- assertSameIndicesOptionalRequests(searchRequest, SearchServiceTransportAction.FREE_CONTEXT_ACTION_NAME);
+ assertSameIndicesOptionalRequests(searchRequest, SearchTransportService.FREE_CONTEXT_ACTION_NAME);
}
public void testSearchDfsQueryThenFetch() throws Exception {
- interceptTransportActions(SearchServiceTransportAction.DFS_ACTION_NAME, SearchServiceTransportAction.QUERY_ID_ACTION_NAME,
- SearchServiceTransportAction.FETCH_ID_ACTION_NAME, SearchServiceTransportAction.FREE_CONTEXT_ACTION_NAME);
+ interceptTransportActions(SearchTransportService.DFS_ACTION_NAME, SearchTransportService.QUERY_ID_ACTION_NAME,
+ SearchTransportService.FETCH_ID_ACTION_NAME, SearchTransportService.FREE_CONTEXT_ACTION_NAME);
String[] randomIndicesOrAliases = randomIndicesOrAliases();
for (int i = 0; i < randomIndicesOrAliases.length; i++) {
@@ -616,15 +616,15 @@ public class IndicesRequestTests extends ESIntegTestCase {
assertThat(searchResponse.getHits().totalHits(), greaterThan(0L));
clearInterceptedActions();
- assertSameIndices(searchRequest, SearchServiceTransportAction.DFS_ACTION_NAME, SearchServiceTransportAction.QUERY_ID_ACTION_NAME,
- SearchServiceTransportAction.FETCH_ID_ACTION_NAME);
+ assertSameIndices(searchRequest, SearchTransportService.DFS_ACTION_NAME, SearchTransportService.QUERY_ID_ACTION_NAME,
+ SearchTransportService.FETCH_ID_ACTION_NAME);
//free context messages are not necessarily sent, but if they are, check their indices
- assertSameIndicesOptionalRequests(searchRequest, SearchServiceTransportAction.FREE_CONTEXT_ACTION_NAME);
+ assertSameIndicesOptionalRequests(searchRequest, SearchTransportService.FREE_CONTEXT_ACTION_NAME);
}
public void testSearchQueryAndFetch() throws Exception {
- interceptTransportActions(SearchServiceTransportAction.QUERY_FETCH_ACTION_NAME,
- SearchServiceTransportAction.FREE_CONTEXT_ACTION_NAME);
+ interceptTransportActions(SearchTransportService.QUERY_FETCH_ACTION_NAME,
+ SearchTransportService.FREE_CONTEXT_ACTION_NAME);
String[] randomIndicesOrAliases = randomIndicesOrAliases();
for (int i = 0; i < randomIndicesOrAliases.length; i++) {
@@ -638,14 +638,14 @@ public class IndicesRequestTests extends ESIntegTestCase {
assertThat(searchResponse.getHits().totalHits(), greaterThan(0L));
clearInterceptedActions();
- assertSameIndices(searchRequest, SearchServiceTransportAction.QUERY_FETCH_ACTION_NAME);
+ assertSameIndices(searchRequest, SearchTransportService.QUERY_FETCH_ACTION_NAME);
//free context messages are not necessarily sent, but if they are, check their indices
- assertSameIndicesOptionalRequests(searchRequest, SearchServiceTransportAction.FREE_CONTEXT_ACTION_NAME);
+ assertSameIndicesOptionalRequests(searchRequest, SearchTransportService.FREE_CONTEXT_ACTION_NAME);
}
public void testSearchDfsQueryAndFetch() throws Exception {
- interceptTransportActions(SearchServiceTransportAction.QUERY_QUERY_FETCH_ACTION_NAME,
- SearchServiceTransportAction.FREE_CONTEXT_ACTION_NAME);
+ interceptTransportActions(SearchTransportService.QUERY_QUERY_FETCH_ACTION_NAME,
+ SearchTransportService.FREE_CONTEXT_ACTION_NAME);
String[] randomIndicesOrAliases = randomIndicesOrAliases();
for (int i = 0; i < randomIndicesOrAliases.length; i++) {
@@ -659,9 +659,9 @@ public class IndicesRequestTests extends ESIntegTestCase {
assertThat(searchResponse.getHits().totalHits(), greaterThan(0L));
clearInterceptedActions();
- assertSameIndices(searchRequest, SearchServiceTransportAction.QUERY_QUERY_FETCH_ACTION_NAME);
+ assertSameIndices(searchRequest, SearchTransportService.QUERY_QUERY_FETCH_ACTION_NAME);
//free context messages are not necessarily sent, but if they are, check their indices
- assertSameIndicesOptionalRequests(searchRequest, SearchServiceTransportAction.FREE_CONTEXT_ACTION_NAME);
+ assertSameIndicesOptionalRequests(searchRequest, SearchTransportService.FREE_CONTEXT_ACTION_NAME);
}
private static void assertSameIndices(IndicesRequest originalRequest, String... actions) {
diff --git a/modules/reindex/build.gradle b/modules/reindex/build.gradle
new file mode 100644
index 00000000000..5bd5aeb3dc0
--- /dev/null
+++ b/modules/reindex/build.gradle
@@ -0,0 +1,23 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+esplugin {
+ description 'The Reindex module adds APIs to reindex from one index to another or update documents in place.'
+ classname 'org.elasticsearch.index.reindex.ReindexPlugin'
+}
diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java
new file mode 100644
index 00000000000..861c03cd706
--- /dev/null
+++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java
@@ -0,0 +1,411 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.reindex;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
+import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
+import org.elasticsearch.action.bulk.BackoffPolicy;
+import org.elasticsearch.action.bulk.BulkItemResponse;
+import org.elasticsearch.action.bulk.BulkItemResponse.Failure;
+import org.elasticsearch.action.bulk.BulkRequest;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.bulk.Retry;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.action.search.ClearScrollRequest;
+import org.elasticsearch.action.search.ClearScrollResponse;
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchScrollRequest;
+import org.elasticsearch.action.search.ShardSearchFailure;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.AbstractRunnable;
+import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static java.lang.Math.max;
+import static java.lang.Math.min;
+import static java.util.Collections.emptyList;
+import static java.util.Collections.unmodifiableList;
+import static org.elasticsearch.action.bulk.BackoffPolicy.exponentialBackoff;
+import static org.elasticsearch.common.unit.TimeValue.timeValueNanos;
+import static org.elasticsearch.index.reindex.AbstractBulkByScrollRequest.SIZE_ALL_MATCHES;
+import static org.elasticsearch.rest.RestStatus.CONFLICT;
+import static org.elasticsearch.search.sort.SortBuilders.fieldSort;
+
+/**
+ * Abstract base for scrolling across a search and executing bulk actions on all
+ * results.
+ */
+public abstract class AbstractAsyncBulkByScrollAction, Response> {
+ /**
+ * The request for this action. Named mainRequest because we create lots of request
variables all representing child
+ * requests of this mainRequest.
+ */
+ protected final Request mainRequest;
+ protected final BulkByScrollTask task;
+
+ private final AtomicLong startTime = new AtomicLong(-1);
+ private final AtomicReference scroll = new AtomicReference<>();
+ private final Set destinationIndices = Collections.newSetFromMap(new ConcurrentHashMap<>());
+
+ private final ESLogger logger;
+ private final Client client;
+ private final ThreadPool threadPool;
+ private final SearchRequest firstSearchRequest;
+ private final ActionListener listener;
+ private final Retry retry;
+
+ public AbstractAsyncBulkByScrollAction(BulkByScrollTask task, ESLogger logger, Client client, ThreadPool threadPool,
+ Request mainRequest, SearchRequest firstSearchRequest, ActionListener listener) {
+ this.task = task;
+ this.logger = logger;
+ this.client = client;
+ this.threadPool = threadPool;
+ this.mainRequest = mainRequest;
+ this.firstSearchRequest = firstSearchRequest;
+ this.listener = listener;
+ retry = Retry.on(EsRejectedExecutionException.class).policy(wrapBackoffPolicy(backoffPolicy()));
+ }
+
+ protected abstract BulkRequest buildBulk(Iterable docs);
+
+ protected abstract Response buildResponse(TimeValue took, List indexingFailures, List searchFailures);
+
+ public void start() {
+ initialSearch();
+ }
+
+ public BulkByScrollTask getTask() {
+ return task;
+ }
+
+ void initialSearch() {
+ if (task.isCancelled()) {
+ finishHim(null);
+ return;
+ }
+ try {
+ // Default to sorting by _doc if it hasn't been changed.
+ if (firstSearchRequest.source().sorts() == null) {
+ firstSearchRequest.source().sort(fieldSort("_doc"));
+ }
+ startTime.set(System.nanoTime());
+ if (logger.isDebugEnabled()) {
+ logger.debug("executing initial scroll against {}{}",
+ firstSearchRequest.indices() == null || firstSearchRequest.indices().length == 0 ? "all indices"
+ : firstSearchRequest.indices(),
+ firstSearchRequest.types() == null || firstSearchRequest.types().length == 0 ? ""
+ : firstSearchRequest.types());
+ }
+ client.search(firstSearchRequest, new ActionListener() {
+ @Override
+ public void onResponse(SearchResponse response) {
+ logger.debug("[{}] documents match query", response.getHits().getTotalHits());
+ onScrollResponse(response);
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ finishHim(e);
+ }
+ });
+ } catch (Throwable t) {
+ finishHim(t);
+ }
+ }
+
+ /**
+ * Set the last returned scrollId. Package private for testing.
+ */
+ void setScroll(String scroll) {
+ this.scroll.set(scroll);
+ }
+
+ void onScrollResponse(SearchResponse searchResponse) {
+ if (task.isCancelled()) {
+ finishHim(null);
+ return;
+ }
+ setScroll(searchResponse.getScrollId());
+ if (searchResponse.getShardFailures() != null && searchResponse.getShardFailures().length > 0) {
+ startNormalTermination(emptyList(), unmodifiableList(Arrays.asList(searchResponse.getShardFailures())));
+ return;
+ }
+ long total = searchResponse.getHits().totalHits();
+ if (mainRequest.getSize() > 0) {
+ total = min(total, mainRequest.getSize());
+ }
+ task.setTotal(total);
+ threadPool.generic().execute(new AbstractRunnable() {
+ @Override
+ protected void doRun() throws Exception {
+ SearchHit[] docs = searchResponse.getHits().getHits();
+ logger.debug("scroll returned [{}] documents with a scroll id of [{}]", docs.length, searchResponse.getScrollId());
+ if (docs.length == 0) {
+ startNormalTermination(emptyList(), emptyList());
+ return;
+ }
+ task.countBatch();
+ List docsIterable = Arrays.asList(docs);
+ if (mainRequest.getSize() != SIZE_ALL_MATCHES) {
+ // Truncate the docs if we have more than the request size
+ long remaining = max(0, mainRequest.getSize() - task.getSuccessfullyProcessed());
+ if (remaining < docs.length) {
+ docsIterable = docsIterable.subList(0, (int) remaining);
+ }
+ }
+ BulkRequest request = buildBulk(docsIterable);
+ if (request.requests().isEmpty()) {
+ /*
+ * If we noop-ed the entire batch then just skip to the next batch or the BulkRequest would fail validation.
+ */
+ startNextScroll();
+ return;
+ }
+ request.timeout(mainRequest.getTimeout());
+ request.consistencyLevel(mainRequest.getConsistency());
+ if (logger.isDebugEnabled()) {
+ logger.debug("sending [{}] entry, [{}] bulk request", request.requests().size(),
+ new ByteSizeValue(request.estimatedSizeInBytes()));
+ }
+ sendBulkRequest(request);
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ finishHim(t);
+ }
+ });
+ }
+
+ void sendBulkRequest(BulkRequest request) {
+ if (task.isCancelled()) {
+ finishHim(null);
+ return;
+ }
+ retry.withAsyncBackoff(client, request, new ActionListener() {
+ @Override
+ public void onResponse(BulkResponse response) {
+ onBulkResponse(response);
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ finishHim(e);
+ }
+ });
+ }
+
+ void onBulkResponse(BulkResponse response) {
+ if (task.isCancelled()) {
+ finishHim(null);
+ return;
+ }
+ try {
+ List failures = new ArrayList();
+ Set destinationIndicesThisBatch = new HashSet<>();
+ for (BulkItemResponse item : response) {
+ if (item.isFailed()) {
+ recordFailure(item.getFailure(), failures);
+ continue;
+ }
+
+ switch (item.getOpType()) {
+ case "index":
+ case "create":
+ IndexResponse ir = item.getResponse();
+ if (ir.isCreated()) {
+ task.countCreated();
+ } else {
+ task.countUpdated();
+ }
+ break;
+ case "delete":
+ task.countDeleted();
+ break;
+ default:
+ throw new IllegalArgumentException("Unknown op type: " + item.getOpType());
+ }
+ // Track the indexes we've seen so we can refresh them if requested
+ destinationIndices.add(item.getIndex());
+ }
+ destinationIndices.addAll(destinationIndicesThisBatch);
+
+ if (false == failures.isEmpty()) {
+ startNormalTermination(unmodifiableList(failures), emptyList());
+ return;
+ }
+
+ if (mainRequest.getSize() != SIZE_ALL_MATCHES && task.getSuccessfullyProcessed() >= mainRequest.getSize()) {
+ // We've processed all the requested docs.
+ startNormalTermination(emptyList(), emptyList());
+ return;
+ }
+ startNextScroll();
+ } catch (Throwable t) {
+ finishHim(t);
+ }
+ }
+
+ void startNextScroll() {
+ if (task.isCancelled()) {
+ finishHim(null);
+ return;
+ }
+ SearchScrollRequest request = new SearchScrollRequest();
+ request.scrollId(scroll.get()).scroll(firstSearchRequest.scroll());
+ client.searchScroll(request, new ActionListener() {
+ @Override
+ public void onResponse(SearchResponse response) {
+ onScrollResponse(response);
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ finishHim(e);
+ }
+ });
+ }
+
+ private void recordFailure(Failure failure, List failures) {
+ if (failure.getStatus() == CONFLICT) {
+ task.countVersionConflict();
+ if (false == mainRequest.isAbortOnVersionConflict()) {
+ return;
+ }
+ }
+ failures.add(failure);
+ }
+
+ void startNormalTermination(List indexingFailures, List searchFailures) {
+ if (false == mainRequest.isRefresh()) {
+ finishHim(null, indexingFailures, searchFailures);
+ return;
+ }
+ RefreshRequest refresh = new RefreshRequest();
+ refresh.indices(destinationIndices.toArray(new String[destinationIndices.size()]));
+ client.admin().indices().refresh(refresh, new ActionListener() {
+ @Override
+ public void onResponse(RefreshResponse response) {
+ finishHim(null, indexingFailures, searchFailures);
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ finishHim(e);
+ }
+ });
+ }
+
+ /**
+ * Finish the request.
+ *
+ * @param failure if non null then the request failed catastrophically with this exception
+ */
+ void finishHim(Throwable failure) {
+ finishHim(failure, emptyList(), emptyList());
+ }
+
+ /**
+ * Finish the request.
+ *
+ * @param failure if non null then the request failed catastrophically with this exception
+ * @param indexingFailures any indexing failures accumulated during the request
+ * @param searchFailures any search failures accumulated during the request
+ */
+ void finishHim(Throwable failure, List indexingFailures, List searchFailures) {
+ String scrollId = scroll.get();
+ if (Strings.hasLength(scrollId)) {
+ /*
+ * Fire off the clear scroll but don't wait for it it return before
+ * we send the use their response.
+ */
+ ClearScrollRequest clearScrollRequest = new ClearScrollRequest();
+ clearScrollRequest.addScrollId(scrollId);
+ client.clearScroll(clearScrollRequest, new ActionListener() {
+ @Override
+ public void onResponse(ClearScrollResponse response) {
+ logger.debug("Freed [{}] contexts", response.getNumFreed());
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ logger.warn("Failed to clear scroll [" + scrollId + ']', e);
+ }
+ });
+ }
+ if (failure == null) {
+ listener.onResponse(buildResponse(timeValueNanos(System.nanoTime() - startTime.get()), indexingFailures, searchFailures));
+ } else {
+ listener.onFailure(failure);
+ }
+ }
+
+ /**
+ * Build the backoff policy for use with retries. Package private for testing.
+ */
+ BackoffPolicy backoffPolicy() {
+ return exponentialBackoff(mainRequest.getRetryBackoffInitialTime(), mainRequest.getMaxRetries());
+ }
+
+ /**
+ * Wraps a backoffPolicy in another policy that counts the number of backoffs acquired.
+ */
+ private BackoffPolicy wrapBackoffPolicy(BackoffPolicy backoffPolicy) {
+ return new BackoffPolicy() {
+ @Override
+ public Iterator iterator() {
+ return new Iterator() {
+ private final Iterator delegate = backoffPolicy.iterator();
+ @Override
+ public boolean hasNext() {
+ return delegate.hasNext();
+ }
+
+ @Override
+ public TimeValue next() {
+ if (false == delegate.hasNext()) {
+ return null;
+ }
+ task.countRetry();
+ return delegate.next();
+ }
+ };
+ }
+ };
+ }
+}
diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollAction.java
new file mode 100644
index 00000000000..3f39f824009
--- /dev/null
+++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollAction.java
@@ -0,0 +1,238 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.reindex;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.bulk.BulkRequest;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.index.mapper.internal.IdFieldMapper;
+import org.elasticsearch.index.mapper.internal.IndexFieldMapper;
+import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
+import org.elasticsearch.index.mapper.internal.RoutingFieldMapper;
+import org.elasticsearch.index.mapper.internal.SourceFieldMapper;
+import org.elasticsearch.index.mapper.internal.TTLFieldMapper;
+import org.elasticsearch.index.mapper.internal.TimestampFieldMapper;
+import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
+import org.elasticsearch.index.mapper.internal.VersionFieldMapper;
+import org.elasticsearch.script.CompiledScript;
+import org.elasticsearch.script.ExecutableScript;
+import org.elasticsearch.script.ScriptContext;
+import org.elasticsearch.script.ScriptService;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.SearchHitField;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Objects;
+
+import static java.util.Collections.emptyMap;
+
+/**
+ * Abstract base for scrolling across a search and executing bulk indexes on all
+ * results.
+ */
+public abstract class AbstractAsyncBulkIndexByScrollAction<
+ Request extends AbstractBulkIndexByScrollRequest,
+ Response extends BulkIndexByScrollResponse>
+ extends AbstractAsyncBulkByScrollAction {
+
+ private final ScriptService scriptService;
+ private final CompiledScript script;
+
+ public AbstractAsyncBulkIndexByScrollAction(BulkByScrollTask task, ESLogger logger, ScriptService scriptService,
+ Client client, ThreadPool threadPool, Request mainRequest, SearchRequest firstSearchRequest,
+ ActionListener listener) {
+ super(task, logger, client, threadPool, mainRequest, firstSearchRequest, listener);
+ this.scriptService = scriptService;
+ if (mainRequest.getScript() == null) {
+ script = null;
+ } else {
+ script = scriptService.compile(mainRequest.getScript(), ScriptContext.Standard.UPDATE, emptyMap());
+ }
+ }
+
+ /**
+ * Build the IndexRequest for a single search hit. This shouldn't handle
+ * metadata or the script. That will be handled by copyMetadata and
+ * applyScript functions that can be overridden.
+ */
+ protected abstract IndexRequest buildIndexRequest(SearchHit doc);
+
+ @Override
+ protected BulkRequest buildBulk(Iterable docs) {
+ BulkRequest bulkRequest = new BulkRequest();
+ ExecutableScript executableScript = null;
+ Map scriptCtx = null;
+
+ for (SearchHit doc : docs) {
+ IndexRequest index = buildIndexRequest(doc);
+ copyMetadata(index, doc);
+ if (script != null) {
+ if (executableScript == null) {
+ executableScript = scriptService.executable(script, mainRequest.getScript().getParams());
+ scriptCtx = new HashMap<>();
+ }
+ if (false == applyScript(index, doc, executableScript, scriptCtx)) {
+ continue;
+ }
+ }
+ bulkRequest.add(index);
+ }
+
+ return bulkRequest;
+ }
+
+ /**
+ * Copies the metadata from a hit to the index request.
+ */
+ protected void copyMetadata(IndexRequest index, SearchHit doc) {
+ index.parent(fieldValue(doc, ParentFieldMapper.NAME));
+ copyRouting(index, doc);
+ // Comes back as a Long but needs to be a string
+ Long timestamp = fieldValue(doc, TimestampFieldMapper.NAME);
+ if (timestamp != null) {
+ index.timestamp(timestamp.toString());
+ }
+ Long ttl = fieldValue(doc, TTLFieldMapper.NAME);
+ if (ttl != null) {
+ index.ttl(ttl);
+ }
+ }
+
+ /**
+ * Part of copyMetadata but called out individual for easy overwriting.
+ */
+ protected void copyRouting(IndexRequest index, SearchHit doc) {
+ index.routing(fieldValue(doc, RoutingFieldMapper.NAME));
+ }
+
+ protected T fieldValue(SearchHit doc, String fieldName) {
+ SearchHitField field = doc.field(fieldName);
+ return field == null ? null : field.value();
+ }
+
+ /**
+ * Apply a script to the request.
+ *
+ * @return is this request still ok to apply (true) or is it a noop (false)
+ */
+ @SuppressWarnings("unchecked")
+ protected boolean applyScript(IndexRequest index, SearchHit doc, ExecutableScript script, final Map ctx) {
+ if (script == null) {
+ return true;
+ }
+ ctx.put(IndexFieldMapper.NAME, doc.index());
+ ctx.put(TypeFieldMapper.NAME, doc.type());
+ ctx.put(IdFieldMapper.NAME, doc.id());
+ Long oldVersion = doc.getVersion();
+ ctx.put(VersionFieldMapper.NAME, oldVersion);
+ String oldParent = fieldValue(doc, ParentFieldMapper.NAME);
+ ctx.put(ParentFieldMapper.NAME, oldParent);
+ String oldRouting = fieldValue(doc, RoutingFieldMapper.NAME);
+ ctx.put(RoutingFieldMapper.NAME, oldRouting);
+ Long oldTimestamp = fieldValue(doc, TimestampFieldMapper.NAME);
+ ctx.put(TimestampFieldMapper.NAME, oldTimestamp);
+ Long oldTTL = fieldValue(doc, TTLFieldMapper.NAME);
+ ctx.put(TTLFieldMapper.NAME, oldTTL);
+ ctx.put(SourceFieldMapper.NAME, index.sourceAsMap());
+ ctx.put("op", "update");
+ script.setNextVar("ctx", ctx);
+ script.run();
+ Map resultCtx = (Map) script.unwrap(ctx);
+ String newOp = (String) resultCtx.remove("op");
+ if (newOp == null) {
+ throw new IllegalArgumentException("Script cleared op!");
+ }
+ if ("noop".equals(newOp)) {
+ task.countNoop();
+ return false;
+ }
+ if (false == "update".equals(newOp)) {
+ throw new IllegalArgumentException("Invalid op [" + newOp + ']');
+ }
+
+ /*
+ * It'd be lovely to only set the source if we know its been modified
+ * but it isn't worth keeping two copies of it around just to check!
+ */
+ index.source((Map) resultCtx.remove(SourceFieldMapper.NAME));
+
+ Object newValue = ctx.remove(IndexFieldMapper.NAME);
+ if (false == doc.index().equals(newValue)) {
+ scriptChangedIndex(index, newValue);
+ }
+ newValue = ctx.remove(TypeFieldMapper.NAME);
+ if (false == doc.type().equals(newValue)) {
+ scriptChangedType(index, newValue);
+ }
+ newValue = ctx.remove(IdFieldMapper.NAME);
+ if (false == doc.id().equals(newValue)) {
+ scriptChangedId(index, newValue);
+ }
+ newValue = ctx.remove(VersionFieldMapper.NAME);
+ if (false == Objects.equals(oldVersion, newValue)) {
+ scriptChangedVersion(index, newValue);
+ }
+ newValue = ctx.remove(ParentFieldMapper.NAME);
+ if (false == Objects.equals(oldParent, newValue)) {
+ scriptChangedParent(index, newValue);
+ }
+ /*
+ * Its important that routing comes after parent in case you want to
+ * change them both.
+ */
+ newValue = ctx.remove(RoutingFieldMapper.NAME);
+ if (false == Objects.equals(oldRouting, newValue)) {
+ scriptChangedRouting(index, newValue);
+ }
+ newValue = ctx.remove(TimestampFieldMapper.NAME);
+ if (false == Objects.equals(oldTimestamp, newValue)) {
+ scriptChangedTimestamp(index, newValue);
+ }
+ newValue = ctx.remove(TTLFieldMapper.NAME);
+ if (false == Objects.equals(oldTTL, newValue)) {
+ scriptChangedTTL(index, newValue);
+ }
+ if (false == ctx.isEmpty()) {
+ throw new IllegalArgumentException("Invalid fields added to ctx [" + String.join(",", ctx.keySet()) + ']');
+ }
+ return true;
+ }
+
+ protected abstract void scriptChangedIndex(IndexRequest index, Object to);
+
+ protected abstract void scriptChangedType(IndexRequest index, Object to);
+
+ protected abstract void scriptChangedId(IndexRequest index, Object to);
+
+ protected abstract void scriptChangedVersion(IndexRequest index, Object to);
+
+ protected abstract void scriptChangedRouting(IndexRequest index, Object to);
+
+ protected abstract void scriptChangedParent(IndexRequest index, Object to);
+
+ protected abstract void scriptChangedTimestamp(IndexRequest index, Object to);
+
+ protected abstract void scriptChangedTTL(IndexRequest index, Object to);
+}
diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBaseReindexRestHandler.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBaseReindexRestHandler.java
new file mode 100644
index 00000000000..6f50b216c9b
--- /dev/null
+++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBaseReindexRestHandler.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.reindex;
+
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.TransportAction;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.indices.query.IndicesQueriesRegistry;
+import org.elasticsearch.rest.BaseRestHandler;
+import org.elasticsearch.rest.BytesRestResponse;
+import org.elasticsearch.rest.RestChannel;
+import org.elasticsearch.rest.RestRequest;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.search.aggregations.AggregatorParsers;
+import org.elasticsearch.tasks.LoggingTaskListener;
+import org.elasticsearch.tasks.Task;
+
+import java.io.IOException;
+
+public abstract class AbstractBaseReindexRestHandler, Response extends BulkIndexByScrollResponse,
+ TA extends TransportAction> extends BaseRestHandler {
+ protected final IndicesQueriesRegistry indicesQueriesRegistry;
+ protected final AggregatorParsers aggParsers;
+ private final ClusterService clusterService;
+ private final TA action;
+
+ protected AbstractBaseReindexRestHandler(Settings settings, Client client,
+ IndicesQueriesRegistry indicesQueriesRegistry, AggregatorParsers aggParsers, ClusterService clusterService, TA action) {
+ super(settings, client);
+ this.indicesQueriesRegistry = indicesQueriesRegistry;
+ this.aggParsers = aggParsers;
+ this.clusterService = clusterService;
+ this.action = action;
+ }
+
+ protected void execute(RestRequest request, Request internalRequest, RestChannel channel) throws IOException {
+ if (request.paramAsBoolean("wait_for_completion", true)) {
+ action.execute(internalRequest, new BulkIndexByScrollResponseContentListener(channel));
+ return;
+ }
+ /*
+ * Lets try and validate before forking so the user gets some error. The
+ * task can't totally validate until it starts but this is better than
+ * nothing.
+ */
+ ActionRequestValidationException validationException = internalRequest.validate();
+ if (validationException != null) {
+ channel.sendResponse(new BytesRestResponse(channel, validationException));
+ return;
+ }
+ Task task = action.execute(internalRequest, LoggingTaskListener.instance());
+ sendTask(channel, task);
+ }
+
+ private void sendTask(RestChannel channel, Task task) throws IOException {
+ XContentBuilder builder = channel.newBuilder();
+ builder.startObject();
+ builder.field("task", clusterService.localNode().getId() + ":" + task.getId());
+ builder.endObject();
+ channel.sendResponse(new BytesRestResponse(RestStatus.OK, builder));
+ }
+}
diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java
new file mode 100644
index 00000000000..41b436e6074
--- /dev/null
+++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java
@@ -0,0 +1,301 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.reindex;
+
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.WriteConsistencyLevel;
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.action.support.replication.ReplicationRequest;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.search.builder.SearchSourceBuilder;
+import org.elasticsearch.tasks.Task;
+
+import java.io.IOException;
+import java.util.Arrays;
+
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
+import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes;
+
+public abstract class AbstractBulkByScrollRequest>
+ extends ActionRequest {
+ public static final int SIZE_ALL_MATCHES = -1;
+ private static final TimeValue DEFAULT_SCROLL_TIMEOUT = timeValueMinutes(5);
+ private static final int DEFAULT_SCROLL_SIZE = 100;
+
+ /**
+ * The search to be executed.
+ */
+ private SearchRequest searchRequest;
+
+ /**
+ * Maximum number of processed documents. Defaults to -1 meaning process all
+ * documents.
+ */
+ private int size = SIZE_ALL_MATCHES;
+
+ /**
+ * Should version conflicts cause aborts? Defaults to true.
+ */
+ private boolean abortOnVersionConflict = true;
+
+ /**
+ * Call refresh on the indexes we've written to after the request ends?
+ */
+ private boolean refresh = false;
+
+ /**
+ * Timeout to wait for the shards on to be available for each bulk request?
+ */
+ private TimeValue timeout = ReplicationRequest.DEFAULT_TIMEOUT;
+
+ /**
+ * Consistency level for write requests.
+ */
+ private WriteConsistencyLevel consistency = WriteConsistencyLevel.DEFAULT;
+
+ /**
+ * Initial delay after a rejection before retrying a bulk request. With the default maxRetries the total backoff for retrying rejections
+ * is about one minute per bulk request. Once the entire bulk request is successful the retry counter resets.
+ */
+ private TimeValue retryBackoffInitialTime = timeValueMillis(500);
+
+ /**
+ * Total number of retries attempted for rejections. There is no way to ask for unlimited retries.
+ */
+ private int maxRetries = 11;
+
+ public AbstractBulkByScrollRequest() {
+ }
+
+ public AbstractBulkByScrollRequest(SearchRequest source) {
+ this.searchRequest = source;
+
+ // Set the defaults which differ from SearchRequest's defaults.
+ source.scroll(DEFAULT_SCROLL_TIMEOUT);
+ source.source(new SearchSourceBuilder());
+ source.source().version(true);
+ source.source().size(DEFAULT_SCROLL_SIZE);
+ }
+
+ /**
+ * `this` cast to Self. Used for building fluent methods without cast
+ * warnings.
+ */
+ protected abstract Self self();
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException e = searchRequest.validate();
+ if (searchRequest.source().from() != -1) {
+ e = addValidationError("from is not supported in this context", e);
+ }
+ if (maxRetries < 0) {
+ e = addValidationError("retries cannnot be negative", e);
+ }
+ if (false == (size == -1 || size > 0)) {
+ e = addValidationError(
+ "size should be greater than 0 if the request is limited to some number of documents or -1 if it isn't but it was ["
+ + size + "]",
+ e);
+ }
+ return e;
+ }
+
+ /**
+ * Maximum number of processed documents. Defaults to -1 meaning process all
+ * documents.
+ */
+ public int getSize() {
+ return size;
+ }
+
+ /**
+ * Maximum number of processed documents. Defaults to -1 meaning process all
+ * documents.
+ */
+ public Self setSize(int size) {
+ this.size = size;
+ return self();
+ }
+
+ /**
+ * Should version conflicts cause aborts? Defaults to false.
+ */
+ public boolean isAbortOnVersionConflict() {
+ return abortOnVersionConflict;
+ }
+
+ /**
+ * Should version conflicts cause aborts? Defaults to false.
+ */
+ public Self setAbortOnVersionConflict(boolean abortOnVersionConflict) {
+ this.abortOnVersionConflict = abortOnVersionConflict;
+ return self();
+ }
+
+ /**
+ * Sets abortOnVersionConflict based on REST-friendly names.
+ */
+ public void setConflicts(String conflicts) {
+ switch (conflicts) {
+ case "proceed":
+ setAbortOnVersionConflict(false);
+ return;
+ case "abort":
+ setAbortOnVersionConflict(true);
+ return;
+ default:
+ throw new IllegalArgumentException("conflicts may only be \"proceed\" or \"abort\" but was [" + conflicts + "]");
+ }
+ }
+
+ /**
+ * The search request that matches the documents to process.
+ */
+ public SearchRequest getSearchRequest() {
+ return searchRequest;
+ }
+
+ /**
+ * Call refresh on the indexes we've written to after the request ends?
+ */
+ public boolean isRefresh() {
+ return refresh;
+ }
+
+ /**
+ * Call refresh on the indexes we've written to after the request ends?
+ */
+ public Self setRefresh(boolean refresh) {
+ this.refresh = refresh;
+ return self();
+ }
+
+ /**
+ * Timeout to wait for the shards on to be available for each bulk request?
+ */
+ public TimeValue getTimeout() {
+ return timeout;
+ }
+
+ /**
+ * Timeout to wait for the shards on to be available for each bulk request?
+ */
+ public Self setTimeout(TimeValue timeout) {
+ this.timeout = timeout;
+ return self();
+ }
+
+ /**
+ * Consistency level for write requests.
+ */
+ public WriteConsistencyLevel getConsistency() {
+ return consistency;
+ }
+
+ /**
+ * Consistency level for write requests.
+ */
+ public Self setConsistency(WriteConsistencyLevel consistency) {
+ this.consistency = consistency;
+ return self();
+ }
+
+ /**
+ * Initial delay after a rejection before retrying request.
+ */
+ public TimeValue getRetryBackoffInitialTime() {
+ return retryBackoffInitialTime;
+ }
+
+ /**
+ * Set the initial delay after a rejection before retrying request.
+ */
+ public Self setRetryBackoffInitialTime(TimeValue retryBackoffInitialTime) {
+ this.retryBackoffInitialTime = retryBackoffInitialTime;
+ return self();
+ }
+
+ /**
+ * Total number of retries attempted for rejections.
+ */
+ public int getMaxRetries() {
+ return maxRetries;
+ }
+
+ /**
+ * Set the total number of retries attempted for rejections. There is no way to ask for unlimited retries.
+ */
+ public Self setMaxRetries(int maxRetries) {
+ this.maxRetries = maxRetries;
+ return self();
+ }
+
+ @Override
+ public Task createTask(long id, String type, String action) {
+ return new BulkByScrollTask(id, type, action, getDescription());
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ searchRequest = new SearchRequest();
+ searchRequest.readFrom(in);
+ abortOnVersionConflict = in.readBoolean();
+ size = in.readVInt();
+ refresh = in.readBoolean();
+ timeout = TimeValue.readTimeValue(in);
+ consistency = WriteConsistencyLevel.fromId(in.readByte());
+ retryBackoffInitialTime = TimeValue.readTimeValue(in);
+ maxRetries = in.readVInt();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ searchRequest.writeTo(out);
+ out.writeBoolean(abortOnVersionConflict);
+ out.writeVInt(size);
+ out.writeBoolean(refresh);
+ timeout.writeTo(out);
+ out.writeByte(consistency.id());
+ retryBackoffInitialTime.writeTo(out);
+ out.writeVInt(maxRetries);
+ }
+
+ /**
+ * Append a short description of the search request to a StringBuilder. Used
+ * to make toString.
+ */
+ protected void searchToString(StringBuilder b) {
+ if (searchRequest.indices() != null && searchRequest.indices().length != 0) {
+ b.append(Arrays.toString(searchRequest.indices()));
+ } else {
+ b.append("[all indices]");
+ }
+ if (searchRequest.types() != null && searchRequest.types().length != 0) {
+ b.append(Arrays.toString(searchRequest.types()));
+ }
+ }
+}
diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequestBuilder.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequestBuilder.java
new file mode 100644
index 00000000000..18ebe42c44d
--- /dev/null
+++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequestBuilder.java
@@ -0,0 +1,109 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.reindex;
+
+import org.elasticsearch.action.Action;
+import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.action.WriteConsistencyLevel;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.client.ElasticsearchClient;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.index.query.QueryBuilder;
+
+public abstract class AbstractBulkByScrollRequestBuilder<
+ Request extends AbstractBulkByScrollRequest,
+ Response extends ActionResponse,
+ Self extends AbstractBulkByScrollRequestBuilder>
+ extends ActionRequestBuilder {
+ private final SearchRequestBuilder source;
+
+ protected AbstractBulkByScrollRequestBuilder(ElasticsearchClient client,
+ Action action, SearchRequestBuilder source, Request request) {
+ super(client, action, request);
+ this.source = source;
+ }
+
+ protected abstract Self self();
+
+ /**
+ * The search used to find documents to process.
+ */
+ public SearchRequestBuilder source() {
+ return source;
+ }
+
+ /**
+ * Set the source indices.
+ */
+ public Self source(String... indices) {
+ source.setIndices(indices);
+ return self();
+ }
+
+ /**
+ * Set the query that will filter the source. Just a convenience method for
+ * easy chaining.
+ */
+ public Self filter(QueryBuilder> filter) {
+ source.setQuery(filter);
+ return self();
+ }
+
+ /**
+ * The maximum number of documents to attempt.
+ */
+ public Self size(int size) {
+ request.setSize(size);
+ return self();
+ }
+
+ /**
+ * Should we version conflicts cause the action to abort?
+ */
+ public Self abortOnVersionConflict(boolean abortOnVersionConflict) {
+ request.setAbortOnVersionConflict(abortOnVersionConflict);
+ return self();
+ }
+
+ /**
+ * Call refresh on the indexes we've written to after the request ends?
+ */
+ public Self refresh(boolean refresh) {
+ request.setRefresh(refresh);
+ return self();
+ }
+
+ /**
+ * Timeout to wait for the shards on to be available for each bulk request.
+ */
+ public Self timeout(TimeValue timeout) {
+ request.setTimeout(timeout);
+ return self();
+ }
+
+ /**
+ * Consistency level for write requests.
+ */
+ public Self consistency(WriteConsistencyLevel consistency) {
+ request.setConsistency(consistency);
+ return self();
+ }
+}
diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkIndexByScrollRequest.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkIndexByScrollRequest.java
new file mode 100644
index 00000000000..c14251d5f46
--- /dev/null
+++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkIndexByScrollRequest.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.reindex;
+
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.script.Script;
+
+import java.io.IOException;
+
+public abstract class AbstractBulkIndexByScrollRequest>
+ extends AbstractBulkByScrollRequest {
+ /**
+ * Script to modify the documents before they are processed.
+ */
+ private Script script;
+
+ public AbstractBulkIndexByScrollRequest() {
+ }
+
+ public AbstractBulkIndexByScrollRequest(SearchRequest source) {
+ super(source);
+ }
+
+ /**
+ * Script to modify the documents before they are processed.
+ */
+ public Script getScript() {
+ return script;
+ }
+
+ /**
+ * Script to modify the documents before they are processed.
+ */
+ public Self setScript(@Nullable Script script) {
+ this.script = script;
+ return self();
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ if (in.readBoolean()) {
+ script = Script.readScript(in);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeOptionalStreamable(script);
+ }
+
+ @Override
+ protected void searchToString(StringBuilder b) {
+ super.searchToString(b);
+ if (script != null) {
+ b.append(" updated with [").append(script).append(']');
+ }
+ }
+}
diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkIndexByScrollRequestBuilder.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkIndexByScrollRequestBuilder.java
new file mode 100644
index 00000000000..e5d39569236
--- /dev/null
+++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkIndexByScrollRequestBuilder.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.reindex;
+
+import org.elasticsearch.action.Action;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.client.ElasticsearchClient;
+import org.elasticsearch.script.Script;
+
+public abstract class AbstractBulkIndexByScrollRequestBuilder<
+ Request extends AbstractBulkIndexByScrollRequest,
+ Response extends ActionResponse,
+ Self extends AbstractBulkIndexByScrollRequestBuilder>
+ extends AbstractBulkByScrollRequestBuilder {
+
+ protected AbstractBulkIndexByScrollRequestBuilder(ElasticsearchClient client,
+ Action action, SearchRequestBuilder search, Request request) {
+ super(client, action, search, request);
+ }
+
+ /**
+ * Script to modify the documents before they are processed.
+ */
+ public Self script(Script script) {
+ request.setScript(script);
+ return self();
+ }
+}
diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java
new file mode 100644
index 00000000000..c9d8e3f188c
--- /dev/null
+++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java
@@ -0,0 +1,290 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.reindex;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.tasks.CancellableTask;
+import org.elasticsearch.tasks.Task;
+
+import java.io.IOException;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ * Task storing information about a currently running BulkByScroll request.
+ */
+public class BulkByScrollTask extends CancellableTask {
+ /**
+ * The total number of documents this request will process. 0 means we don't yet know or, possibly, there are actually 0 documents
+ * to process. Its ok that these have the same meaning because any request with 0 actual documents should be quite short lived.
+ */
+ private final AtomicLong total = new AtomicLong(0);
+ private final AtomicLong updated = new AtomicLong(0);
+ private final AtomicLong created = new AtomicLong(0);
+ private final AtomicLong deleted = new AtomicLong(0);
+ private final AtomicLong noops = new AtomicLong(0);
+ private final AtomicInteger batch = new AtomicInteger(0);
+ private final AtomicLong versionConflicts = new AtomicLong(0);
+ private final AtomicLong retries = new AtomicLong(0);
+
+ public BulkByScrollTask(long id, String type, String action, String description) {
+ super(id, type, action, description);
+ }
+
+ @Override
+ public Status getStatus() {
+ return new Status(total.get(), updated.get(), created.get(), deleted.get(), batch.get(), versionConflicts.get(), noops.get(),
+ retries.get(), getReasonCancelled());
+ }
+
+ /**
+ * Total number of successfully processed documents.
+ */
+ public long getSuccessfullyProcessed() {
+ return updated.get() + created.get() + deleted.get();
+ }
+
+ public static class Status implements Task.Status {
+ public static final Status PROTOTYPE = new Status(0, 0, 0, 0, 0, 0, 0, 0, null);
+
+ private final long total;
+ private final long updated;
+ private final long created;
+ private final long deleted;
+ private final int batches;
+ private final long versionConflicts;
+ private final long noops;
+ private final long retries;
+ private final String reasonCancelled;
+
+ public Status(long total, long updated, long created, long deleted, int batches, long versionConflicts, long noops, long retries,
+ @Nullable String reasonCancelled) {
+ this.total = checkPositive(total, "total");
+ this.updated = checkPositive(updated, "updated");
+ this.created = checkPositive(created, "created");
+ this.deleted = checkPositive(deleted, "deleted");
+ this.batches = checkPositive(batches, "batches");
+ this.versionConflicts = checkPositive(versionConflicts, "versionConflicts");
+ this.noops = checkPositive(noops, "noops");
+ this.retries = checkPositive(retries, "retries");
+ this.reasonCancelled = reasonCancelled;
+ }
+
+ public Status(StreamInput in) throws IOException {
+ total = in.readVLong();
+ updated = in.readVLong();
+ created = in.readVLong();
+ deleted = in.readVLong();
+ batches = in.readVInt();
+ versionConflicts = in.readVLong();
+ noops = in.readVLong();
+ retries = in.readVLong();
+ reasonCancelled = in.readOptionalString();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVLong(total);
+ out.writeVLong(updated);
+ out.writeVLong(created);
+ out.writeVLong(deleted);
+ out.writeVInt(batches);
+ out.writeVLong(versionConflicts);
+ out.writeVLong(noops);
+ out.writeVLong(retries);
+ out.writeOptionalString(reasonCancelled);
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject();
+ innerXContent(builder, params, true, true);
+ return builder.endObject();
+ }
+
+ public XContentBuilder innerXContent(XContentBuilder builder, Params params, boolean includeCreated, boolean includeDeleted)
+ throws IOException {
+ builder.field("total", total);
+ builder.field("updated", updated);
+ if (includeCreated) {
+ builder.field("created", created);
+ }
+ if (includeDeleted) {
+ builder.field("deleted", deleted);
+ }
+ builder.field("batches", batches);
+ builder.field("version_conflicts", versionConflicts);
+ builder.field("noops", noops);
+ builder.field("retries", retries);
+ if (reasonCancelled != null) {
+ builder.field("canceled", reasonCancelled);
+ }
+ return builder;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ builder.append("BulkIndexByScrollResponse[");
+ innerToString(builder, true, true);
+ return builder.append(']').toString();
+ }
+
+ public void innerToString(StringBuilder builder, boolean includeCreated, boolean includeDeleted) {
+ builder.append("updated=").append(updated);
+ if (includeCreated) {
+ builder.append(",created=").append(created);
+ }
+ if (includeDeleted) {
+ builder.append(",deleted=").append(deleted);
+ }
+ builder.append(",batches=").append(batches);
+ builder.append(",versionConflicts=").append(versionConflicts);
+ builder.append(",noops=").append(noops);
+ builder.append(",retries=").append(retries);
+ if (reasonCancelled != null) {
+ builder.append(",canceled=").append(reasonCancelled);
+ }
+ }
+
+ @Override
+ public String getWriteableName() {
+ return "bulk-by-scroll";
+ }
+
+ @Override
+ public Status readFrom(StreamInput in) throws IOException {
+ return new Status(in);
+ }
+
+ /**
+ * The total number of documents this request will process. 0 means we don't yet know or, possibly, there are actually 0 documents
+ * to process. Its ok that these have the same meaning because any request with 0 actual documents should be quite short lived.
+ */
+ public long getTotal() {
+ return total;
+ }
+
+ /**
+ * Count of documents updated.
+ */
+ public long getUpdated() {
+ return updated;
+ }
+
+ /**
+ * Count of documents created.
+ */
+ public long getCreated() {
+ return created;
+ }
+
+ /**
+ * Count of successful delete operations.
+ */
+ public long getDeleted() {
+ return deleted;
+ }
+
+ /**
+ * Number of scan responses this request has processed.
+ */
+ public int getBatches() {
+ return batches;
+ }
+
+ /**
+ * Number of version conflicts this request has hit.
+ */
+ public long getVersionConflicts() {
+ return versionConflicts;
+ }
+
+ /**
+ * Number of noops (skipped bulk items) as part of this request.
+ */
+ public long getNoops() {
+ return noops;
+ }
+
+ /**
+ * Number of retries that had to be attempted due to rejected executions.
+ */
+ public long getRetries() {
+ return retries;
+ }
+
+ /**
+ * The reason that the request was canceled or null if it hasn't been.
+ */
+ public String getReasonCancelled() {
+ return reasonCancelled;
+ }
+
+ private int checkPositive(int value, String name) {
+ if (value < 0) {
+ throw new IllegalArgumentException(name + " must be greater than 0 but was [" + value + "]");
+ }
+ return value;
+ }
+
+ private long checkPositive(long value, String name) {
+ if (value < 0) {
+ throw new IllegalArgumentException(name + " must be greater than 0 but was [" + value + "]");
+ }
+ return value;
+ }
+ }
+
+ void setTotal(long totalHits) {
+ total.set(totalHits);
+ }
+
+ void countBatch() {
+ batch.incrementAndGet();
+ }
+
+ void countNoop() {
+ noops.incrementAndGet();
+ }
+
+ void countCreated() {
+ created.incrementAndGet();
+ }
+
+ void countUpdated() {
+ updated.incrementAndGet();
+ }
+
+ void countDeleted() {
+ deleted.incrementAndGet();
+ }
+
+ void countVersionConflict() {
+ versionConflicts.incrementAndGet();
+ }
+
+ void countRetry() {
+ retries.incrementAndGet();
+ }
+}
diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponse.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponse.java
new file mode 100644
index 00000000000..ca1a53ef999
--- /dev/null
+++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponse.java
@@ -0,0 +1,169 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.reindex;
+
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.action.bulk.BulkItemResponse.Failure;
+import org.elasticsearch.action.search.ShardSearchFailure;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import static java.lang.Math.min;
+import static java.util.Collections.unmodifiableList;
+import static java.util.Objects.requireNonNull;
+import static org.elasticsearch.action.search.ShardSearchFailure.readShardSearchFailure;
+
+/**
+ * Response used for actions that index many documents using a scroll request.
+ */
+public class BulkIndexByScrollResponse extends ActionResponse implements ToXContent {
+ private TimeValue took;
+ private BulkByScrollTask.Status status;
+ private List indexingFailures;
+ private List searchFailures;
+
+ public BulkIndexByScrollResponse() {
+ }
+
+ public BulkIndexByScrollResponse(TimeValue took, BulkByScrollTask.Status status, List indexingFailures,
+ List searchFailures) {
+ this.took = took;
+ this.status = requireNonNull(status, "Null status not supported");
+ this.indexingFailures = indexingFailures;
+ this.searchFailures = searchFailures;
+ }
+
+ public TimeValue getTook() {
+ return took;
+ }
+
+ protected BulkByScrollTask.Status getStatus() {
+ return status;
+ }
+
+ public long getUpdated() {
+ return status.getUpdated();
+ }
+
+ public int getBatches() {
+ return status.getBatches();
+ }
+
+ public long getVersionConflicts() {
+ return status.getVersionConflicts();
+ }
+
+ public long getNoops() {
+ return status.getNoops();
+ }
+
+ /**
+ * The reason that the request was canceled or null if it hasn't been.
+ */
+ public String getReasonCancelled() {
+ return status.getReasonCancelled();
+ }
+
+ /**
+ * All of the indexing failures. Version conflicts are only included if the request sets abortOnVersionConflict to true (the
+ * default).
+ */
+ public List getIndexingFailures() {
+ return indexingFailures;
+ }
+
+ /**
+ * All search failures.
+ */
+ public List getSearchFailures() {
+ return searchFailures;
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ took.writeTo(out);
+ status.writeTo(out);
+ out.writeVInt(indexingFailures.size());
+ for (Failure failure: indexingFailures) {
+ failure.writeTo(out);
+ }
+ out.writeVInt(searchFailures.size());
+ for (ShardSearchFailure failure: searchFailures) {
+ failure.writeTo(out);
+ }
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ took = TimeValue.readTimeValue(in);
+ status = new BulkByScrollTask.Status(in);
+ int indexingFailuresCount = in.readVInt();
+ List