mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-02-08 22:14:59 +00:00
Merge branch 'master' into feature/aggs-refactoring
# Conflicts: # core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java # core/src/main/java/org/elasticsearch/percolator/PercolateContext.java # core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java # core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java # core/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java
This commit is contained in:
commit
859f9e69b7
@ -39,6 +39,8 @@ import java.util.Map;
|
|||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
|
import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_UUID_NA_VALUE;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A base class for all elasticsearch exceptions.
|
* A base class for all elasticsearch exceptions.
|
||||||
*/
|
*/
|
||||||
@ -49,6 +51,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||||||
public static final boolean REST_EXCEPTION_SKIP_STACK_TRACE_DEFAULT = true;
|
public static final boolean REST_EXCEPTION_SKIP_STACK_TRACE_DEFAULT = true;
|
||||||
public static final boolean REST_EXCEPTION_SKIP_CAUSE_DEFAULT = false;
|
public static final boolean REST_EXCEPTION_SKIP_CAUSE_DEFAULT = false;
|
||||||
private static final String INDEX_HEADER_KEY = "es.index";
|
private static final String INDEX_HEADER_KEY = "es.index";
|
||||||
|
private static final String INDEX_HEADER_KEY_UUID = "es.index_uuid";
|
||||||
private static final String SHARD_HEADER_KEY = "es.shard";
|
private static final String SHARD_HEADER_KEY = "es.shard";
|
||||||
private static final String RESOURCE_HEADER_TYPE_KEY = "es.resource.type";
|
private static final String RESOURCE_HEADER_TYPE_KEY = "es.resource.type";
|
||||||
private static final String RESOURCE_HEADER_ID_KEY = "es.resource.id";
|
private static final String RESOURCE_HEADER_ID_KEY = "es.resource.id";
|
||||||
@ -70,7 +73,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||||||
* The message can be parameterized using <code>{}</code> as placeholders for the given
|
* The message can be parameterized using <code>{}</code> as placeholders for the given
|
||||||
* arguments
|
* arguments
|
||||||
*
|
*
|
||||||
* @param msg the detail message
|
* @param msg the detail message
|
||||||
* @param args the arguments for the message
|
* @param args the arguments for the message
|
||||||
*/
|
*/
|
||||||
public ElasticsearchException(String msg, Object... args) {
|
public ElasticsearchException(String msg, Object... args) {
|
||||||
@ -332,7 +335,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||||||
|
|
||||||
private void xContentHeader(XContentBuilder builder, String key, List<String> values) throws IOException {
|
private void xContentHeader(XContentBuilder builder, String key, List<String> values) throws IOException {
|
||||||
if (values != null && values.isEmpty() == false) {
|
if (values != null && values.isEmpty() == false) {
|
||||||
if(values.size() == 1) {
|
if (values.size() == 1) {
|
||||||
builder.field(key, values.get(0));
|
builder.field(key, values.get(0));
|
||||||
} else {
|
} else {
|
||||||
builder.startArray(key);
|
builder.startArray(key);
|
||||||
@ -374,7 +377,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||||||
if (cause != null && cause instanceof ElasticsearchException) {
|
if (cause != null && cause instanceof ElasticsearchException) {
|
||||||
return ((ElasticsearchException) cause).guessRootCauses();
|
return ((ElasticsearchException) cause).guessRootCauses();
|
||||||
}
|
}
|
||||||
return new ElasticsearchException[] {this};
|
return new ElasticsearchException[]{this};
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -387,7 +390,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||||||
if (ex instanceof ElasticsearchException) {
|
if (ex instanceof ElasticsearchException) {
|
||||||
return ((ElasticsearchException) ex).guessRootCauses();
|
return ((ElasticsearchException) ex).guessRootCauses();
|
||||||
}
|
}
|
||||||
return new ElasticsearchException[] {new ElasticsearchException(t.getMessage(), t) {
|
return new ElasticsearchException[]{new ElasticsearchException(t.getMessage(), t) {
|
||||||
@Override
|
@Override
|
||||||
protected String getExceptionName() {
|
protected String getExceptionName() {
|
||||||
return getExceptionName(getCause());
|
return getExceptionName(getCause());
|
||||||
@ -414,7 +417,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||||||
public String toString() {
|
public String toString() {
|
||||||
StringBuilder builder = new StringBuilder();
|
StringBuilder builder = new StringBuilder();
|
||||||
if (headers.containsKey(INDEX_HEADER_KEY)) {
|
if (headers.containsKey(INDEX_HEADER_KEY)) {
|
||||||
builder.append('[').append(getIndex()).append(']');
|
builder.append(getIndex());
|
||||||
if (headers.containsKey(SHARD_HEADER_KEY)) {
|
if (headers.containsKey(SHARD_HEADER_KEY)) {
|
||||||
builder.append('[').append(getShardId()).append(']');
|
builder.append('[').append(getShardId()).append(']');
|
||||||
}
|
}
|
||||||
@ -435,7 +438,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||||||
final String fileName = in.readOptionalString();
|
final String fileName = in.readOptionalString();
|
||||||
final String methodName = in.readString();
|
final String methodName = in.readString();
|
||||||
final int lineNumber = in.readVInt();
|
final int lineNumber = in.readVInt();
|
||||||
stackTrace[i] = new StackTraceElement(declaringClasss,methodName, fileName, lineNumber);
|
stackTrace[i] = new StackTraceElement(declaringClasss, methodName, fileName, lineNumber);
|
||||||
}
|
}
|
||||||
throwable.setStackTrace(stackTrace);
|
throwable.setStackTrace(stackTrace);
|
||||||
|
|
||||||
@ -631,10 +634,11 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||||||
CLASS_TO_ELASTICSEARCH_EXCEPTION_HANDLE = Collections.unmodifiableMap(exceptions);
|
CLASS_TO_ELASTICSEARCH_EXCEPTION_HANDLE = Collections.unmodifiableMap(exceptions);
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getIndex() {
|
public Index getIndex() {
|
||||||
List<String> index = getHeader(INDEX_HEADER_KEY);
|
List<String> index = getHeader(INDEX_HEADER_KEY);
|
||||||
if (index != null && index.isEmpty() == false) {
|
if (index != null && index.isEmpty() == false) {
|
||||||
return index.get(0);
|
List<String> index_uuid = getHeader(INDEX_HEADER_KEY_UUID);
|
||||||
|
return new Index(index.get(0), index_uuid.get(0));
|
||||||
}
|
}
|
||||||
|
|
||||||
return null;
|
return null;
|
||||||
@ -651,22 +655,28 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||||||
public void setIndex(Index index) {
|
public void setIndex(Index index) {
|
||||||
if (index != null) {
|
if (index != null) {
|
||||||
addHeader(INDEX_HEADER_KEY, index.getName());
|
addHeader(INDEX_HEADER_KEY, index.getName());
|
||||||
|
addHeader(INDEX_HEADER_KEY_UUID, index.getUUID());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setIndex(String index) {
|
public void setIndex(String index) {
|
||||||
if (index != null) {
|
if (index != null) {
|
||||||
addHeader(INDEX_HEADER_KEY, index);
|
setIndex(new Index(index, INDEX_UUID_NA_VALUE));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setShard(ShardId shardId) {
|
public void setShard(ShardId shardId) {
|
||||||
if (shardId != null) {
|
if (shardId != null) {
|
||||||
addHeader(INDEX_HEADER_KEY, shardId.getIndex());
|
setIndex(shardId.getIndex());
|
||||||
addHeader(SHARD_HEADER_KEY, Integer.toString(shardId.id()));
|
addHeader(SHARD_HEADER_KEY, Integer.toString(shardId.id()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void setShard(String index, int shardId) {
|
||||||
|
setIndex(index);
|
||||||
|
addHeader(SHARD_HEADER_KEY, Integer.toString(shardId));
|
||||||
|
}
|
||||||
|
|
||||||
public void setResources(String type, String... id) {
|
public void setResources(String type, String... id) {
|
||||||
assert type != null;
|
assert type != null;
|
||||||
addHeader(RESOURCE_HEADER_ID_KEY, id);
|
addHeader(RESOURCE_HEADER_ID_KEY, id);
|
||||||
@ -691,7 +701,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||||||
final ElasticsearchException[] rootCauses = ElasticsearchException.guessRootCauses(t);
|
final ElasticsearchException[] rootCauses = ElasticsearchException.guessRootCauses(t);
|
||||||
builder.field("root_cause");
|
builder.field("root_cause");
|
||||||
builder.startArray();
|
builder.startArray();
|
||||||
for (ElasticsearchException rootCause : rootCauses){
|
for (ElasticsearchException rootCause : rootCauses) {
|
||||||
builder.startObject();
|
builder.startObject();
|
||||||
rootCause.toXContent(builder, new ToXContent.DelegatingMapParams(Collections.singletonMap(ElasticsearchException.REST_EXCEPTION_SKIP_CAUSE, "true"), params));
|
rootCause.toXContent(builder, new ToXContent.DelegatingMapParams(Collections.singletonMap(ElasticsearchException.REST_EXCEPTION_SKIP_CAUSE, "true"), params));
|
||||||
builder.endObject();
|
builder.endObject();
|
||||||
|
@ -26,6 +26,7 @@ import org.elasticsearch.action.ShardOperationFailedException;
|
|||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
import org.elasticsearch.common.logging.ESLogger;
|
import org.elasticsearch.common.logging.ESLogger;
|
||||||
import org.elasticsearch.common.logging.Loggers;
|
import org.elasticsearch.common.logging.Loggers;
|
||||||
|
import org.elasticsearch.index.Index;
|
||||||
import org.elasticsearch.rest.RestStatus;
|
import org.elasticsearch.rest.RestStatus;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
@ -243,7 +244,12 @@ public final class ExceptionsHelper {
|
|||||||
|
|
||||||
public GroupBy(Throwable t) {
|
public GroupBy(Throwable t) {
|
||||||
if (t instanceof ElasticsearchException) {
|
if (t instanceof ElasticsearchException) {
|
||||||
index = ((ElasticsearchException) t).getIndex();
|
final Index index = ((ElasticsearchException) t).getIndex();
|
||||||
|
if (index != null) {
|
||||||
|
this.index = index.getName();
|
||||||
|
} else {
|
||||||
|
this.index = null;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
index = null;
|
index = null;
|
||||||
}
|
}
|
||||||
|
@ -32,10 +32,6 @@ public abstract class ActionRequest<Request extends ActionRequest<Request>> exte
|
|||||||
|
|
||||||
public ActionRequest() {
|
public ActionRequest() {
|
||||||
super();
|
super();
|
||||||
}
|
|
||||||
|
|
||||||
protected ActionRequest(ActionRequest<?> request) {
|
|
||||||
super(request);
|
|
||||||
// this does not set the listenerThreaded API, if needed, its up to the caller to set it
|
// this does not set the listenerThreaded API, if needed, its up to the caller to set it
|
||||||
// since most times, we actually want it to not be threaded...
|
// since most times, we actually want it to not be threaded...
|
||||||
// this.listenerThreaded = request.listenerThreaded();
|
// this.listenerThreaded = request.listenerThreaded();
|
||||||
|
@ -49,12 +49,6 @@ public abstract class ActionRequestBuilder<Request extends ActionRequest, Respon
|
|||||||
return this.request;
|
return this.request;
|
||||||
}
|
}
|
||||||
|
|
||||||
@SuppressWarnings("unchecked")
|
|
||||||
public final RequestBuilder putHeader(String key, Object value) {
|
|
||||||
request.putHeader(key, value);
|
|
||||||
return (RequestBuilder) this;
|
|
||||||
}
|
|
||||||
|
|
||||||
public ListenableActionFuture<Response> execute() {
|
public ListenableActionFuture<Response> execute() {
|
||||||
PlainListenableActionFuture<Response> future = new PlainListenableActionFuture<>(threadPool);
|
PlainListenableActionFuture<Response> future = new PlainListenableActionFuture<>(threadPool);
|
||||||
execute(future);
|
execute(future);
|
||||||
|
@ -53,7 +53,7 @@ public abstract class DocWriteResponse extends ReplicationResponse implements St
|
|||||||
* The index the document was changed in.
|
* The index the document was changed in.
|
||||||
*/
|
*/
|
||||||
public String getIndex() {
|
public String getIndex() {
|
||||||
return this.shardId.getIndex();
|
return this.shardId.getIndexName();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -119,7 +119,7 @@ public abstract class DocWriteResponse extends ReplicationResponse implements St
|
|||||||
@Override
|
@Override
|
||||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||||
ReplicationResponse.ShardInfo shardInfo = getShardInfo();
|
ReplicationResponse.ShardInfo shardInfo = getShardInfo();
|
||||||
builder.field(Fields._INDEX, shardId.getIndex())
|
builder.field(Fields._INDEX, shardId.getIndexName())
|
||||||
.field(Fields._TYPE, type)
|
.field(Fields._TYPE, type)
|
||||||
.field(Fields._ID, id)
|
.field(Fields._ID, id)
|
||||||
.field(Fields._VERSION, version);
|
.field(Fields._VERSION, version);
|
||||||
|
@ -29,6 +29,7 @@ import org.elasticsearch.common.io.stream.Streamable;
|
|||||||
import org.elasticsearch.common.xcontent.ToXContent;
|
import org.elasticsearch.common.xcontent.ToXContent;
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||||
|
import org.elasticsearch.index.shard.ShardId;
|
||||||
import org.elasticsearch.rest.RestStatus;
|
import org.elasticsearch.rest.RestStatus;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
@ -169,15 +170,13 @@ public class ReplicationResponse extends ActionResponse {
|
|||||||
|
|
||||||
public static class Failure implements ShardOperationFailedException, ToXContent {
|
public static class Failure implements ShardOperationFailedException, ToXContent {
|
||||||
|
|
||||||
private String index;
|
private ShardId shardId;
|
||||||
private int shardId;
|
|
||||||
private String nodeId;
|
private String nodeId;
|
||||||
private Throwable cause;
|
private Throwable cause;
|
||||||
private RestStatus status;
|
private RestStatus status;
|
||||||
private boolean primary;
|
private boolean primary;
|
||||||
|
|
||||||
public Failure(String index, int shardId, @Nullable String nodeId, Throwable cause, RestStatus status, boolean primary) {
|
public Failure(ShardId shardId, @Nullable String nodeId, Throwable cause, RestStatus status, boolean primary) {
|
||||||
this.index = index;
|
|
||||||
this.shardId = shardId;
|
this.shardId = shardId;
|
||||||
this.nodeId = nodeId;
|
this.nodeId = nodeId;
|
||||||
this.cause = cause;
|
this.cause = cause;
|
||||||
@ -193,7 +192,7 @@ public class ReplicationResponse extends ActionResponse {
|
|||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public String index() {
|
public String index() {
|
||||||
return index;
|
return shardId.getIndexName();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -201,6 +200,10 @@ public class ReplicationResponse extends ActionResponse {
|
|||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public int shardId() {
|
public int shardId() {
|
||||||
|
return shardId.id();
|
||||||
|
}
|
||||||
|
|
||||||
|
public ShardId fullShardId() {
|
||||||
return shardId;
|
return shardId;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -243,8 +246,7 @@ public class ReplicationResponse extends ActionResponse {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void readFrom(StreamInput in) throws IOException {
|
public void readFrom(StreamInput in) throws IOException {
|
||||||
index = in.readString();
|
shardId = ShardId.readShardId(in);
|
||||||
shardId = in.readVInt();
|
|
||||||
nodeId = in.readOptionalString();
|
nodeId = in.readOptionalString();
|
||||||
cause = in.readThrowable();
|
cause = in.readThrowable();
|
||||||
status = RestStatus.readFrom(in);
|
status = RestStatus.readFrom(in);
|
||||||
@ -253,8 +255,7 @@ public class ReplicationResponse extends ActionResponse {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void writeTo(StreamOutput out) throws IOException {
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
out.writeString(index);
|
shardId.writeTo(out);
|
||||||
out.writeVInt(shardId);
|
|
||||||
out.writeOptionalString(nodeId);
|
out.writeOptionalString(nodeId);
|
||||||
out.writeThrowable(cause);
|
out.writeThrowable(cause);
|
||||||
RestStatus.writeTo(out, status);
|
RestStatus.writeTo(out, status);
|
||||||
@ -264,8 +265,8 @@ public class ReplicationResponse extends ActionResponse {
|
|||||||
@Override
|
@Override
|
||||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||||
builder.startObject();
|
builder.startObject();
|
||||||
builder.field(Fields._INDEX, index);
|
builder.field(Fields._INDEX, shardId.getIndexName());
|
||||||
builder.field(Fields._SHARD, shardId);
|
builder.field(Fields._SHARD, shardId.id());
|
||||||
builder.field(Fields._NODE, nodeId);
|
builder.field(Fields._NODE, nodeId);
|
||||||
builder.field(Fields.REASON);
|
builder.field(Fields.REASON);
|
||||||
builder.startObject();
|
builder.startObject();
|
||||||
|
@ -36,13 +36,19 @@ public class UnavailableShardsException extends ElasticsearchException {
|
|||||||
super(buildMessage(shardId, message), args);
|
super(buildMessage(shardId, message), args);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public UnavailableShardsException(String index, int shardId, String message, Object... args) {
|
||||||
|
super(buildMessage(index, shardId, message), args);
|
||||||
|
}
|
||||||
|
|
||||||
private static String buildMessage(ShardId shardId, String message) {
|
private static String buildMessage(ShardId shardId, String message) {
|
||||||
if (shardId == null) {
|
if (shardId == null) {
|
||||||
return message;
|
return message;
|
||||||
}
|
}
|
||||||
return "[" + shardId.index().name() + "][" + shardId.id() + "] " + message;
|
return buildMessage(shardId.getIndexName(), shardId.id(), message);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static String buildMessage(String index, int shardId, String message) {return "[" + index + "][" + shardId + "] " + message;}
|
||||||
|
|
||||||
public UnavailableShardsException(StreamInput in) throws IOException {
|
public UnavailableShardsException(StreamInput in) throws IOException {
|
||||||
super(in);
|
super(in);
|
||||||
}
|
}
|
||||||
|
@ -141,7 +141,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
|
|||||||
}
|
}
|
||||||
|
|
||||||
assert waitFor >= 0;
|
assert waitFor >= 0;
|
||||||
final ClusterStateObserver observer = new ClusterStateObserver(clusterService, logger);
|
final ClusterStateObserver observer = new ClusterStateObserver(clusterService, logger, threadPool.getThreadContext());
|
||||||
final ClusterState state = observer.observedState();
|
final ClusterState state = observer.observedState();
|
||||||
if (waitFor == 0 || request.timeout().millis() == 0) {
|
if (waitFor == 0 || request.timeout().millis() == 0) {
|
||||||
listener.onResponse(getResponse(request, state, waitFor, request.timeout().millis() == 0));
|
listener.onResponse(getResponse(request, state, waitFor, request.timeout().millis() == 0));
|
||||||
|
@ -102,7 +102,7 @@ public class TransportNodesHotThreadsAction extends TransportNodesAction<NodesHo
|
|||||||
}
|
}
|
||||||
|
|
||||||
NodeRequest(String nodeId, NodesHotThreadsRequest request) {
|
NodeRequest(String nodeId, NodesHotThreadsRequest request) {
|
||||||
super(request, nodeId);
|
super(nodeId);
|
||||||
this.request = request;
|
this.request = request;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -96,7 +96,7 @@ public class TransportNodesInfoAction extends TransportNodesAction<NodesInfoRequ
|
|||||||
}
|
}
|
||||||
|
|
||||||
NodeInfoRequest(String nodeId, NodesInfoRequest request) {
|
NodeInfoRequest(String nodeId, NodesInfoRequest request) {
|
||||||
super(request, nodeId);
|
super(nodeId);
|
||||||
this.request = request;
|
this.request = request;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -96,7 +96,7 @@ public class TransportNodesStatsAction extends TransportNodesAction<NodesStatsRe
|
|||||||
}
|
}
|
||||||
|
|
||||||
NodeStatsRequest(String nodeId, NodesStatsRequest request) {
|
NodeStatsRequest(String nodeId, NodesStatsRequest request) {
|
||||||
super(request, nodeId);
|
super(nodeId);
|
||||||
this.request = request;
|
this.request = request;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -25,6 +25,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
|||||||
import org.elasticsearch.common.io.stream.Streamable;
|
import org.elasticsearch.common.io.stream.Streamable;
|
||||||
import org.elasticsearch.common.xcontent.ToXContent;
|
import org.elasticsearch.common.xcontent.ToXContent;
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
|
import org.elasticsearch.index.Index;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
@ -32,7 +33,7 @@ import java.io.IOException;
|
|||||||
*/
|
*/
|
||||||
public class ClusterSearchShardsGroup implements Streamable, ToXContent {
|
public class ClusterSearchShardsGroup implements Streamable, ToXContent {
|
||||||
|
|
||||||
private String index;
|
private Index index;
|
||||||
private int shardId;
|
private int shardId;
|
||||||
ShardRouting[] shards;
|
ShardRouting[] shards;
|
||||||
|
|
||||||
@ -40,7 +41,7 @@ public class ClusterSearchShardsGroup implements Streamable, ToXContent {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public ClusterSearchShardsGroup(String index, int shardId, ShardRouting[] shards) {
|
public ClusterSearchShardsGroup(Index index, int shardId, ShardRouting[] shards) {
|
||||||
this.index = index;
|
this.index = index;
|
||||||
this.shardId = shardId;
|
this.shardId = shardId;
|
||||||
this.shards = shards;
|
this.shards = shards;
|
||||||
@ -53,7 +54,7 @@ public class ClusterSearchShardsGroup implements Streamable, ToXContent {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public String getIndex() {
|
public String getIndex() {
|
||||||
return index;
|
return index.getName();
|
||||||
}
|
}
|
||||||
|
|
||||||
public int getShardId() {
|
public int getShardId() {
|
||||||
@ -66,7 +67,7 @@ public class ClusterSearchShardsGroup implements Streamable, ToXContent {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void readFrom(StreamInput in) throws IOException {
|
public void readFrom(StreamInput in) throws IOException {
|
||||||
index = in.readString();
|
index = Index.readIndex(in);
|
||||||
shardId = in.readVInt();
|
shardId = in.readVInt();
|
||||||
shards = new ShardRouting[in.readVInt()];
|
shards = new ShardRouting[in.readVInt()];
|
||||||
for (int i = 0; i < shards.length; i++) {
|
for (int i = 0; i < shards.length; i++) {
|
||||||
@ -76,7 +77,7 @@ public class ClusterSearchShardsGroup implements Streamable, ToXContent {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void writeTo(StreamOutput out) throws IOException {
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
out.writeString(index);
|
index.writeTo(out);
|
||||||
out.writeVInt(shardId);
|
out.writeVInt(shardId);
|
||||||
out.writeVInt(shards.length);
|
out.writeVInt(shards.length);
|
||||||
for (ShardRouting shardRouting : shards) {
|
for (ShardRouting shardRouting : shards) {
|
||||||
|
@ -33,6 +33,7 @@ import org.elasticsearch.cluster.routing.ShardIterator;
|
|||||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||||
import org.elasticsearch.common.inject.Inject;
|
import org.elasticsearch.common.inject.Inject;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
import org.elasticsearch.index.Index;
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
import org.elasticsearch.transport.TransportService;
|
import org.elasticsearch.transport.TransportService;
|
||||||
|
|
||||||
@ -77,7 +78,7 @@ public class TransportClusterSearchShardsAction extends TransportMasterNodeReadA
|
|||||||
ClusterSearchShardsGroup[] groupResponses = new ClusterSearchShardsGroup[groupShardsIterator.size()];
|
ClusterSearchShardsGroup[] groupResponses = new ClusterSearchShardsGroup[groupShardsIterator.size()];
|
||||||
int currentGroup = 0;
|
int currentGroup = 0;
|
||||||
for (ShardIterator shardIt : groupShardsIterator) {
|
for (ShardIterator shardIt : groupShardsIterator) {
|
||||||
String index = shardIt.shardId().getIndex();
|
Index index = shardIt.shardId().getIndex();
|
||||||
int shardId = shardIt.shardId().getId();
|
int shardId = shardIt.shardId().getId();
|
||||||
ShardRouting[] shardRoutings = new ShardRouting[shardIt.size()];
|
ShardRouting[] shardRoutings = new ShardRouting[shardIt.size()];
|
||||||
int currentShard = 0;
|
int currentShard = 0;
|
||||||
|
@ -20,7 +20,6 @@
|
|||||||
package org.elasticsearch.action.admin.cluster.snapshots.status;
|
package org.elasticsearch.action.admin.cluster.snapshots.status;
|
||||||
|
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.action.ActionRequest;
|
|
||||||
import org.elasticsearch.action.FailedNodeException;
|
import org.elasticsearch.action.FailedNodeException;
|
||||||
import org.elasticsearch.action.support.ActionFilters;
|
import org.elasticsearch.action.support.ActionFilters;
|
||||||
import org.elasticsearch.action.support.nodes.BaseNodeRequest;
|
import org.elasticsearch.action.support.nodes.BaseNodeRequest;
|
||||||
@ -146,8 +145,8 @@ public class TransportNodesSnapshotsStatus extends TransportNodesAction<Transpor
|
|||||||
public Request() {
|
public Request() {
|
||||||
}
|
}
|
||||||
|
|
||||||
public Request(ActionRequest<?> request, String[] nodesIds) {
|
public Request(String[] nodesIds) {
|
||||||
super(request, nodesIds);
|
super(nodesIds);
|
||||||
}
|
}
|
||||||
|
|
||||||
public Request snapshotIds(SnapshotId[] snapshotIds) {
|
public Request snapshotIds(SnapshotId[] snapshotIds) {
|
||||||
@ -214,7 +213,7 @@ public class TransportNodesSnapshotsStatus extends TransportNodesAction<Transpor
|
|||||||
}
|
}
|
||||||
|
|
||||||
NodeRequest(String nodeId, TransportNodesSnapshotsStatus.Request request) {
|
NodeRequest(String nodeId, TransportNodesSnapshotsStatus.Request request) {
|
||||||
super(request, nodeId);
|
super(nodeId);
|
||||||
snapshotIds = request.snapshotIds;
|
snapshotIds = request.snapshotIds;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -110,7 +110,7 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction<Sn
|
|||||||
snapshotIds[i] = currentSnapshots.get(i).snapshotId();
|
snapshotIds[i] = currentSnapshots.get(i).snapshotId();
|
||||||
}
|
}
|
||||||
|
|
||||||
TransportNodesSnapshotsStatus.Request nodesRequest = new TransportNodesSnapshotsStatus.Request(request, nodesIds.toArray(new String[nodesIds.size()]))
|
TransportNodesSnapshotsStatus.Request nodesRequest = new TransportNodesSnapshotsStatus.Request(nodesIds.toArray(new String[nodesIds.size()]))
|
||||||
.snapshotIds(snapshotIds).timeout(request.masterNodeTimeout());
|
.snapshotIds(snapshotIds).timeout(request.masterNodeTimeout());
|
||||||
transportNodesSnapshotsStatus.execute(nodesRequest, new ActionListener<TransportNodesSnapshotsStatus.NodesSnapshotStatus>() {
|
transportNodesSnapshotsStatus.execute(nodesRequest, new ActionListener<TransportNodesSnapshotsStatus.NodesSnapshotStatus>() {
|
||||||
@Override
|
@Override
|
||||||
|
@ -66,10 +66,10 @@ public class ClusterStatsIndices implements ToXContent, Streamable {
|
|||||||
|
|
||||||
for (ClusterStatsNodeResponse r : nodeResponses) {
|
for (ClusterStatsNodeResponse r : nodeResponses) {
|
||||||
for (org.elasticsearch.action.admin.indices.stats.ShardStats shardStats : r.shardsStats()) {
|
for (org.elasticsearch.action.admin.indices.stats.ShardStats shardStats : r.shardsStats()) {
|
||||||
ShardStats indexShardStats = countsPerIndex.get(shardStats.getShardRouting().getIndex());
|
ShardStats indexShardStats = countsPerIndex.get(shardStats.getShardRouting().getIndexName());
|
||||||
if (indexShardStats == null) {
|
if (indexShardStats == null) {
|
||||||
indexShardStats = new ShardStats();
|
indexShardStats = new ShardStats();
|
||||||
countsPerIndex.put(shardStats.getShardRouting().getIndex(), indexShardStats);
|
countsPerIndex.put(shardStats.getShardRouting().getIndexName(), indexShardStats);
|
||||||
}
|
}
|
||||||
|
|
||||||
indexShardStats.total++;
|
indexShardStats.total++;
|
||||||
|
@ -132,7 +132,7 @@ public class TransportClusterStatsAction extends TransportNodesAction<ClusterSta
|
|||||||
}
|
}
|
||||||
|
|
||||||
ClusterStatsNodeRequest(String nodeId, ClusterStatsRequest request) {
|
ClusterStatsNodeRequest(String nodeId, ClusterStatsRequest request) {
|
||||||
super(request, nodeId);
|
super(nodeId);
|
||||||
this.request = request;
|
this.request = request;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -57,7 +57,7 @@ public class TransportRenderSearchTemplateAction extends HandledTransportAction<
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void doRun() throws Exception {
|
protected void doRun() throws Exception {
|
||||||
ExecutableScript executable = scriptService.executable(request.template(), ScriptContext.Standard.SEARCH, request, Collections.emptyMap());
|
ExecutableScript executable = scriptService.executable(request.template(), ScriptContext.Standard.SEARCH, Collections.emptyMap());
|
||||||
BytesReference processedTemplate = (BytesReference) executable.run();
|
BytesReference processedTemplate = (BytesReference) executable.run();
|
||||||
RenderSearchTemplateResponse response = new RenderSearchTemplateResponse();
|
RenderSearchTemplateResponse response = new RenderSearchTemplateResponse();
|
||||||
response.source(processedTemplate);
|
response.source(processedTemplate);
|
||||||
|
@ -81,7 +81,7 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastByNodeAc
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected EmptyResult shardOperation(ClearIndicesCacheRequest request, ShardRouting shardRouting) {
|
protected EmptyResult shardOperation(ClearIndicesCacheRequest request, ShardRouting shardRouting) {
|
||||||
IndexService service = indicesService.indexService(shardRouting.getIndex());
|
IndexService service = indicesService.indexService(shardRouting.getIndexName());
|
||||||
if (service != null) {
|
if (service != null) {
|
||||||
IndexShard shard = service.getShardOrNull(shardRouting.id());
|
IndexShard shard = service.getShardOrNull(shardRouting.id());
|
||||||
boolean clearedAtLeastOne = false;
|
boolean clearedAtLeastOne = false;
|
||||||
|
@ -81,14 +81,6 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
|
|||||||
public CreateIndexRequest() {
|
public CreateIndexRequest() {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Constructs a new request to create an index that was triggered by a different request,
|
|
||||||
* provided as an argument so that its headers and context can be copied to the new request.
|
|
||||||
*/
|
|
||||||
public CreateIndexRequest(ActionRequest request) {
|
|
||||||
super(request);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructs a new request to create an index with the specified name.
|
* Constructs a new request to create an index with the specified name.
|
||||||
*/
|
*/
|
||||||
|
@ -42,17 +42,6 @@ public class FlushRequest extends BroadcastRequest<FlushRequest> {
|
|||||||
private boolean force = false;
|
private boolean force = false;
|
||||||
private boolean waitIfOngoing = false;
|
private boolean waitIfOngoing = false;
|
||||||
|
|
||||||
public FlushRequest() {
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Copy constructor that creates a new flush request that is a copy of the one provided as an argument.
|
|
||||||
* The new request will inherit though headers and context from the original request that caused it.
|
|
||||||
*/
|
|
||||||
public FlushRequest(ActionRequest originalRequest) {
|
|
||||||
super(originalRequest);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructs a new flush request against one or more indices. If nothing is provided, all indices will
|
* Constructs a new flush request against one or more indices. If nothing is provided, all indices will
|
||||||
* be flushed.
|
* be flushed.
|
||||||
|
@ -31,7 +31,7 @@ public class ShardFlushRequest extends ReplicationRequest<ShardFlushRequest> {
|
|||||||
private FlushRequest request = new FlushRequest();
|
private FlushRequest request = new FlushRequest();
|
||||||
|
|
||||||
public ShardFlushRequest(FlushRequest request, ShardId shardId) {
|
public ShardFlushRequest(FlushRequest request, ShardId shardId) {
|
||||||
super(request, shardId);
|
super(shardId);
|
||||||
this.request = request;
|
this.request = request;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -36,17 +36,6 @@ import java.util.Arrays;
|
|||||||
*/
|
*/
|
||||||
public class SyncedFlushRequest extends BroadcastRequest<SyncedFlushRequest> {
|
public class SyncedFlushRequest extends BroadcastRequest<SyncedFlushRequest> {
|
||||||
|
|
||||||
public SyncedFlushRequest() {
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Copy constructor that creates a new synced flush request that is a copy of the one provided as an argument.
|
|
||||||
* The new request will inherit though headers and context from the original request that caused it.
|
|
||||||
*/
|
|
||||||
public SyncedFlushRequest(ActionRequest originalRequest) {
|
|
||||||
super(originalRequest);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructs a new synced flush request against one or more indices. If nothing is provided, all indices will
|
* Constructs a new synced flush request against one or more indices. If nothing is provided, all indices will
|
||||||
* be sync flushed.
|
* be sync flushed.
|
||||||
|
@ -42,7 +42,6 @@ public class GetFieldMappingsIndexRequest extends SingleShardRequest<GetFieldMap
|
|||||||
}
|
}
|
||||||
|
|
||||||
GetFieldMappingsIndexRequest(GetFieldMappingsRequest other, String index, boolean probablySingleFieldRequest) {
|
GetFieldMappingsIndexRequest(GetFieldMappingsRequest other, String index, boolean probablySingleFieldRequest) {
|
||||||
super(other);
|
|
||||||
this.probablySingleFieldRequest = probablySingleFieldRequest;
|
this.probablySingleFieldRequest = probablySingleFieldRequest;
|
||||||
this.includeDefaults = other.includeDefaults();
|
this.includeDefaults = other.includeDefaults();
|
||||||
this.types = other.types();
|
this.types = other.types();
|
||||||
|
@ -102,7 +102,7 @@ public class TransportGetFieldMappingsIndexAction extends TransportSingleShardAc
|
|||||||
.filter(type -> Regex.simpleMatch(request.types(), type))
|
.filter(type -> Regex.simpleMatch(request.types(), type))
|
||||||
.collect(Collectors.toCollection(ArrayList::new));
|
.collect(Collectors.toCollection(ArrayList::new));
|
||||||
if (typeIntersection.isEmpty()) {
|
if (typeIntersection.isEmpty()) {
|
||||||
throw new TypeMissingException(shardId.index(), request.types());
|
throw new TypeMissingException(shardId.getIndex(), request.types());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -115,7 +115,7 @@ public class TransportGetFieldMappingsIndexAction extends TransportSingleShardAc
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return new GetFieldMappingsResponse(singletonMap(shardId.getIndex(), typeMappings.immutableMap()));
|
return new GetFieldMappingsResponse(singletonMap(shardId.getIndexName(), typeMappings.immutableMap()));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -75,7 +75,7 @@ public class TransportRecoveryAction extends TransportBroadcastByNodeAction<Reco
|
|||||||
if (recoveryState == null) {
|
if (recoveryState == null) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
String indexName = recoveryState.getShardId().getIndex();
|
String indexName = recoveryState.getShardId().getIndexName();
|
||||||
if (!shardResponses.containsKey(indexName)) {
|
if (!shardResponses.containsKey(indexName)) {
|
||||||
shardResponses.put(indexName, new ArrayList<>());
|
shardResponses.put(indexName, new ArrayList<>());
|
||||||
}
|
}
|
||||||
|
@ -33,17 +33,6 @@ import org.elasticsearch.action.support.broadcast.BroadcastRequest;
|
|||||||
*/
|
*/
|
||||||
public class RefreshRequest extends BroadcastRequest<RefreshRequest> {
|
public class RefreshRequest extends BroadcastRequest<RefreshRequest> {
|
||||||
|
|
||||||
public RefreshRequest() {
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Copy constructor that creates a new refresh request that is a copy of the one provided as an argument.
|
|
||||||
* The new request will inherit though headers and context from the original request that caused it.
|
|
||||||
*/
|
|
||||||
public RefreshRequest(ActionRequest originalRequest) {
|
|
||||||
super(originalRequest);
|
|
||||||
}
|
|
||||||
|
|
||||||
public RefreshRequest(String... indices) {
|
public RefreshRequest(String... indices) {
|
||||||
super(indices);
|
super(indices);
|
||||||
}
|
}
|
||||||
|
@ -54,7 +54,7 @@ public class TransportRefreshAction extends TransportBroadcastReplicationAction<
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected BasicReplicationRequest newShardRequest(RefreshRequest request, ShardId shardId) {
|
protected BasicReplicationRequest newShardRequest(RefreshRequest request, ShardId shardId) {
|
||||||
return new BasicReplicationRequest(request, shardId);
|
return new BasicReplicationRequest(shardId);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -62,17 +62,17 @@ public class IndicesSegmentResponse extends BroadcastResponse implements ToXCont
|
|||||||
|
|
||||||
Set<String> indices = new HashSet<>();
|
Set<String> indices = new HashSet<>();
|
||||||
for (ShardSegments shard : shards) {
|
for (ShardSegments shard : shards) {
|
||||||
indices.add(shard.getShardRouting().getIndex());
|
indices.add(shard.getShardRouting().getIndexName());
|
||||||
}
|
}
|
||||||
|
|
||||||
for (String index : indices) {
|
for (String indexName : indices) {
|
||||||
List<ShardSegments> shards = new ArrayList<>();
|
List<ShardSegments> shards = new ArrayList<>();
|
||||||
for (ShardSegments shard : this.shards) {
|
for (ShardSegments shard : this.shards) {
|
||||||
if (shard.getShardRouting().index().equals(index)) {
|
if (shard.getShardRouting().getIndexName().equals(indexName)) {
|
||||||
shards.add(shard);
|
shards.add(shard);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
indicesSegments.put(index, new IndexSegments(index, shards.toArray(new ShardSegments[shards.size()])));
|
indicesSegments.put(indexName, new IndexSegments(indexName, shards.toArray(new ShardSegments[shards.size()])));
|
||||||
}
|
}
|
||||||
this.indicesSegments = indicesSegments;
|
this.indicesSegments = indicesSegments;
|
||||||
return indicesSegments;
|
return indicesSegments;
|
||||||
|
@ -93,7 +93,7 @@ public class TransportIndicesSegmentsAction extends TransportBroadcastByNodeActi
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected ShardSegments shardOperation(IndicesSegmentsRequest request, ShardRouting shardRouting) {
|
protected ShardSegments shardOperation(IndicesSegmentsRequest request, ShardRouting shardRouting) {
|
||||||
IndexService indexService = indicesService.indexServiceSafe(shardRouting.getIndex());
|
IndexService indexService = indicesService.indexServiceSafe(shardRouting.getIndexName());
|
||||||
IndexShard indexShard = indexService.getShard(shardRouting.id());
|
IndexShard indexShard = indexService.getShard(shardRouting.id());
|
||||||
return new ShardSegments(indexShard.routingEntry(), indexShard.segments(request.verbose()));
|
return new ShardSegments(indexShard.routingEntry(), indexShard.segments(request.verbose()));
|
||||||
}
|
}
|
||||||
|
@ -166,7 +166,7 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
|
|||||||
ImmutableOpenMap.Builder<String, ImmutableOpenIntMap<java.util.List<IndicesShardStoresResponse.StoreStatus>>> indicesStoreStatusesBuilder = ImmutableOpenMap.builder();
|
ImmutableOpenMap.Builder<String, ImmutableOpenIntMap<java.util.List<IndicesShardStoresResponse.StoreStatus>>> indicesStoreStatusesBuilder = ImmutableOpenMap.builder();
|
||||||
java.util.List<IndicesShardStoresResponse.Failure> failureBuilder = new ArrayList<>();
|
java.util.List<IndicesShardStoresResponse.Failure> failureBuilder = new ArrayList<>();
|
||||||
for (Response fetchResponse : fetchResponses) {
|
for (Response fetchResponse : fetchResponses) {
|
||||||
ImmutableOpenIntMap<java.util.List<IndicesShardStoresResponse.StoreStatus>> indexStoreStatuses = indicesStoreStatusesBuilder.get(fetchResponse.shardId.getIndex());
|
ImmutableOpenIntMap<java.util.List<IndicesShardStoresResponse.StoreStatus>> indexStoreStatuses = indicesStoreStatusesBuilder.get(fetchResponse.shardId.getIndexName());
|
||||||
final ImmutableOpenIntMap.Builder<java.util.List<IndicesShardStoresResponse.StoreStatus>> indexShardsBuilder;
|
final ImmutableOpenIntMap.Builder<java.util.List<IndicesShardStoresResponse.StoreStatus>> indexShardsBuilder;
|
||||||
if (indexStoreStatuses == null) {
|
if (indexStoreStatuses == null) {
|
||||||
indexShardsBuilder = ImmutableOpenIntMap.builder();
|
indexShardsBuilder = ImmutableOpenIntMap.builder();
|
||||||
@ -179,15 +179,15 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
|
|||||||
}
|
}
|
||||||
for (NodeGatewayStartedShards response : fetchResponse.responses) {
|
for (NodeGatewayStartedShards response : fetchResponse.responses) {
|
||||||
if (shardExistsInNode(response)) {
|
if (shardExistsInNode(response)) {
|
||||||
IndicesShardStoresResponse.StoreStatus.AllocationStatus allocationStatus = getAllocationStatus(fetchResponse.shardId.getIndex(), fetchResponse.shardId.id(), response.getNode());
|
IndicesShardStoresResponse.StoreStatus.AllocationStatus allocationStatus = getAllocationStatus(fetchResponse.shardId.getIndexName(), fetchResponse.shardId.id(), response.getNode());
|
||||||
storeStatuses.add(new IndicesShardStoresResponse.StoreStatus(response.getNode(), response.version(), response.allocationId(), allocationStatus, response.storeException()));
|
storeStatuses.add(new IndicesShardStoresResponse.StoreStatus(response.getNode(), response.version(), response.allocationId(), allocationStatus, response.storeException()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
CollectionUtil.timSort(storeStatuses);
|
CollectionUtil.timSort(storeStatuses);
|
||||||
indexShardsBuilder.put(fetchResponse.shardId.id(), storeStatuses);
|
indexShardsBuilder.put(fetchResponse.shardId.id(), storeStatuses);
|
||||||
indicesStoreStatusesBuilder.put(fetchResponse.shardId.getIndex(), indexShardsBuilder.build());
|
indicesStoreStatusesBuilder.put(fetchResponse.shardId.getIndexName(), indexShardsBuilder.build());
|
||||||
for (FailedNodeException failure : fetchResponse.failures) {
|
for (FailedNodeException failure : fetchResponse.failures) {
|
||||||
failureBuilder.add(new IndicesShardStoresResponse.Failure(failure.nodeId(), fetchResponse.shardId.getIndex(), fetchResponse.shardId.id(), failure.getCause()));
|
failureBuilder.add(new IndicesShardStoresResponse.Failure(failure.nodeId(), fetchResponse.shardId.getIndexName(), fetchResponse.shardId.id(), failure.getCause()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
listener.onResponse(new IndicesShardStoresResponse(indicesStoreStatusesBuilder.build(), Collections.unmodifiableList(failureBuilder)));
|
listener.onResponse(new IndicesShardStoresResponse(indicesStoreStatusesBuilder.build(), Collections.unmodifiableList(failureBuilder)));
|
||||||
@ -196,7 +196,7 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
|
|||||||
private IndicesShardStoresResponse.StoreStatus.AllocationStatus getAllocationStatus(String index, int shardID, DiscoveryNode node) {
|
private IndicesShardStoresResponse.StoreStatus.AllocationStatus getAllocationStatus(String index, int shardID, DiscoveryNode node) {
|
||||||
for (ShardRouting shardRouting : routingNodes.node(node.id())) {
|
for (ShardRouting shardRouting : routingNodes.node(node.id())) {
|
||||||
ShardId shardId = shardRouting.shardId();
|
ShardId shardId = shardRouting.shardId();
|
||||||
if (shardId.id() == shardID && shardId.getIndex().equals(index)) {
|
if (shardId.id() == shardID && shardId.getIndexName().equals(index)) {
|
||||||
if (shardRouting.primary()) {
|
if (shardRouting.primary()) {
|
||||||
return IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY;
|
return IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY;
|
||||||
} else if (shardRouting.assignedToNode()) {
|
} else if (shardRouting.assignedToNode()) {
|
||||||
|
@ -89,17 +89,17 @@ public class IndicesStatsResponse extends BroadcastResponse implements ToXConten
|
|||||||
|
|
||||||
Set<String> indices = new HashSet<>();
|
Set<String> indices = new HashSet<>();
|
||||||
for (ShardStats shard : shards) {
|
for (ShardStats shard : shards) {
|
||||||
indices.add(shard.getShardRouting().getIndex());
|
indices.add(shard.getShardRouting().getIndexName());
|
||||||
}
|
}
|
||||||
|
|
||||||
for (String index : indices) {
|
for (String indexName : indices) {
|
||||||
List<ShardStats> shards = new ArrayList<>();
|
List<ShardStats> shards = new ArrayList<>();
|
||||||
for (ShardStats shard : this.shards) {
|
for (ShardStats shard : this.shards) {
|
||||||
if (shard.getShardRouting().index().equals(index)) {
|
if (shard.getShardRouting().getIndexName().equals(indexName)) {
|
||||||
shards.add(shard);
|
shards.add(shard);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
indicesStats.put(index, new IndexStats(index, shards.toArray(new ShardStats[shards.size()])));
|
indicesStats.put(indexName, new IndexStats(indexName, shards.toArray(new ShardStats[shards.size()])));
|
||||||
}
|
}
|
||||||
this.indicesStats = indicesStats;
|
this.indicesStats = indicesStats;
|
||||||
return indicesStats;
|
return indicesStats;
|
||||||
|
@ -59,14 +59,14 @@ public class UpgradeStatusResponse extends BroadcastResponse implements ToXConte
|
|||||||
indices.add(shard.getIndex());
|
indices.add(shard.getIndex());
|
||||||
}
|
}
|
||||||
|
|
||||||
for (String index : indices) {
|
for (String indexName : indices) {
|
||||||
List<ShardUpgradeStatus> shards = new ArrayList<>();
|
List<ShardUpgradeStatus> shards = new ArrayList<>();
|
||||||
for (ShardUpgradeStatus shard : this.shards) {
|
for (ShardUpgradeStatus shard : this.shards) {
|
||||||
if (shard.getShardRouting().index().equals(index)) {
|
if (shard.getShardRouting().getIndexName().equals(indexName)) {
|
||||||
shards.add(shard);
|
shards.add(shard);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
indicesUpgradeStats.put(index, new IndexUpgradeStatus(index, shards.toArray(new ShardUpgradeStatus[shards.size()])));
|
indicesUpgradeStats.put(indexName, new IndexUpgradeStatus(indexName, shards.toArray(new ShardUpgradeStatus[shards.size()])));
|
||||||
}
|
}
|
||||||
this.indicesUpgradeStatus = indicesUpgradeStats;
|
this.indicesUpgradeStatus = indicesUpgradeStats;
|
||||||
return indicesUpgradeStats;
|
return indicesUpgradeStats;
|
||||||
|
@ -41,6 +41,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
|||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.index.shard.IndexShard;
|
import org.elasticsearch.index.shard.IndexShard;
|
||||||
import org.elasticsearch.indices.IndicesService;
|
import org.elasticsearch.indices.IndicesService;
|
||||||
|
import org.elasticsearch.tasks.Task;
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
import org.elasticsearch.transport.TransportService;
|
import org.elasticsearch.transport.TransportService;
|
||||||
|
|
||||||
@ -75,7 +76,7 @@ public class TransportUpgradeAction extends TransportBroadcastByNodeAction<Upgra
|
|||||||
Map<String, Tuple<Version, org.apache.lucene.util.Version>> versions = new HashMap<>();
|
Map<String, Tuple<Version, org.apache.lucene.util.Version>> versions = new HashMap<>();
|
||||||
for (ShardUpgradeResult result : shardUpgradeResults) {
|
for (ShardUpgradeResult result : shardUpgradeResults) {
|
||||||
successfulShards++;
|
successfulShards++;
|
||||||
String index = result.getShardId().getIndex();
|
String index = result.getShardId().getIndex().getName();
|
||||||
if (result.primary()) {
|
if (result.primary()) {
|
||||||
Integer count = successfulPrimaryShards.get(index);
|
Integer count = successfulPrimaryShards.get(index);
|
||||||
successfulPrimaryShards.put(index, count == null ? 1 : count + 1);
|
successfulPrimaryShards.put(index, count == null ? 1 : count + 1);
|
||||||
@ -179,7 +180,7 @@ public class TransportUpgradeAction extends TransportBroadcastByNodeAction<Upgra
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void doExecute(UpgradeRequest request, final ActionListener<UpgradeResponse> listener) {
|
protected void doExecute(Task task, UpgradeRequest request, final ActionListener<UpgradeResponse> listener) {
|
||||||
ActionListener<UpgradeResponse> settingsUpdateListener = new ActionListener<UpgradeResponse>() {
|
ActionListener<UpgradeResponse> settingsUpdateListener = new ActionListener<UpgradeResponse>() {
|
||||||
@Override
|
@Override
|
||||||
public void onResponse(UpgradeResponse upgradeResponse) {
|
public void onResponse(UpgradeResponse upgradeResponse) {
|
||||||
@ -199,7 +200,7 @@ public class TransportUpgradeAction extends TransportBroadcastByNodeAction<Upgra
|
|||||||
listener.onFailure(e);
|
listener.onFailure(e);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
super.doExecute(request, settingsUpdateListener);
|
super.doExecute(task, request, settingsUpdateListener);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void updateSettings(final UpgradeResponse upgradeResponse, final ActionListener<UpgradeResponse> listener) {
|
private void updateSettings(final UpgradeResponse upgradeResponse, final ActionListener<UpgradeResponse> listener) {
|
||||||
|
@ -53,6 +53,7 @@ import org.elasticsearch.search.fetch.FetchPhase;
|
|||||||
import org.elasticsearch.search.internal.DefaultSearchContext;
|
import org.elasticsearch.search.internal.DefaultSearchContext;
|
||||||
import org.elasticsearch.search.internal.SearchContext;
|
import org.elasticsearch.search.internal.SearchContext;
|
||||||
import org.elasticsearch.search.internal.ShardSearchLocalRequest;
|
import org.elasticsearch.search.internal.ShardSearchLocalRequest;
|
||||||
|
import org.elasticsearch.tasks.Task;
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
import org.elasticsearch.transport.TransportService;
|
import org.elasticsearch.transport.TransportService;
|
||||||
|
|
||||||
@ -93,14 +94,14 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<Valid
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void doExecute(ValidateQueryRequest request, ActionListener<ValidateQueryResponse> listener) {
|
protected void doExecute(Task task, ValidateQueryRequest request, ActionListener<ValidateQueryResponse> listener) {
|
||||||
request.nowInMillis = System.currentTimeMillis();
|
request.nowInMillis = System.currentTimeMillis();
|
||||||
super.doExecute(request, listener);
|
super.doExecute(task, request, listener);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected ShardValidateQueryRequest newShardRequest(int numShards, ShardRouting shard, ValidateQueryRequest request) {
|
protected ShardValidateQueryRequest newShardRequest(int numShards, ShardRouting shard, ValidateQueryRequest request) {
|
||||||
String[] filteringAliases = indexNameExpressionResolver.filteringAliases(clusterService.state(), shard.index(), request.indices());
|
String[] filteringAliases = indexNameExpressionResolver.filteringAliases(clusterService.state(), shard.getIndexName(), request.indices());
|
||||||
return new ShardValidateQueryRequest(shard.shardId(), filteringAliases, request);
|
return new ShardValidateQueryRequest(shard.shardId(), filteringAliases, request);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -69,14 +69,6 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||||||
public BulkRequest() {
|
public BulkRequest() {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a bulk request caused by some other request, which is provided as an
|
|
||||||
* argument so that its headers and context can be copied to the new request
|
|
||||||
*/
|
|
||||||
public BulkRequest(ActionRequest<?> request) {
|
|
||||||
super(request);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Adds a list of requests to be executed. Either index or delete requests.
|
* Adds a list of requests to be executed. Either index or delete requests.
|
||||||
*/
|
*/
|
||||||
|
@ -41,7 +41,7 @@ public class BulkShardRequest extends ReplicationRequest<BulkShardRequest> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
BulkShardRequest(BulkRequest bulkRequest, ShardId shardId, boolean refresh, BulkItemRequest[] items) {
|
BulkShardRequest(BulkRequest bulkRequest, ShardId shardId, boolean refresh, BulkItemRequest[] items) {
|
||||||
super(bulkRequest, shardId);
|
super(shardId);
|
||||||
this.items = items;
|
this.items = items;
|
||||||
this.refresh = refresh;
|
this.refresh = refresh;
|
||||||
}
|
}
|
||||||
|
@ -114,7 +114,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||||||
for (Map.Entry<String, Set<String>> entry : indicesAndTypes.entrySet()) {
|
for (Map.Entry<String, Set<String>> entry : indicesAndTypes.entrySet()) {
|
||||||
final String index = entry.getKey();
|
final String index = entry.getKey();
|
||||||
if (autoCreateIndex.shouldAutoCreate(index, state)) {
|
if (autoCreateIndex.shouldAutoCreate(index, state)) {
|
||||||
CreateIndexRequest createIndexRequest = new CreateIndexRequest(bulkRequest);
|
CreateIndexRequest createIndexRequest = new CreateIndexRequest();
|
||||||
createIndexRequest.index(index);
|
createIndexRequest.index(index);
|
||||||
for (String type : entry.getValue()) {
|
for (String type : entry.getValue()) {
|
||||||
createIndexRequest.mapping(type);
|
createIndexRequest.mapping(type);
|
||||||
@ -377,7 +377,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||||||
if (unavailableException == null) {
|
if (unavailableException == null) {
|
||||||
IndexMetaData indexMetaData = metaData.index(concreteIndex);
|
IndexMetaData indexMetaData = metaData.index(concreteIndex);
|
||||||
if (indexMetaData.getState() == IndexMetaData.State.CLOSE) {
|
if (indexMetaData.getState() == IndexMetaData.State.CLOSE) {
|
||||||
unavailableException = new IndexClosedException(new Index(metaData.index(request.index()).getIndex()));
|
unavailableException = new IndexClosedException(metaData.index(request.index()).getIndex());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (unavailableException != null) {
|
if (unavailableException != null) {
|
||||||
|
@ -92,7 +92,7 @@ public class DeleteRequest extends ReplicationRequest<DeleteRequest> implements
|
|||||||
* The new request will inherit though headers and context from the original request that caused it.
|
* The new request will inherit though headers and context from the original request that caused it.
|
||||||
*/
|
*/
|
||||||
public DeleteRequest(DeleteRequest request, ActionRequest originalRequest) {
|
public DeleteRequest(DeleteRequest request, ActionRequest originalRequest) {
|
||||||
super(request, originalRequest);
|
super(request);
|
||||||
this.type = request.type();
|
this.type = request.type();
|
||||||
this.id = request.id();
|
this.id = request.id();
|
||||||
this.routing = request.routing();
|
this.routing = request.routing();
|
||||||
@ -102,14 +102,6 @@ public class DeleteRequest extends ReplicationRequest<DeleteRequest> implements
|
|||||||
this.versionType = request.versionType();
|
this.versionType = request.versionType();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a delete request caused by some other request, which is provided as an
|
|
||||||
* argument so that its headers and context can be copied to the new request
|
|
||||||
*/
|
|
||||||
public DeleteRequest(ActionRequest request) {
|
|
||||||
super(request);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ActionRequestValidationException validate() {
|
public ActionRequestValidationException validate() {
|
||||||
ActionRequestValidationException validationException = super.validate();
|
ActionRequestValidationException validationException = super.validate();
|
||||||
|
@ -44,6 +44,7 @@ import org.elasticsearch.index.shard.IndexShard;
|
|||||||
import org.elasticsearch.index.shard.ShardId;
|
import org.elasticsearch.index.shard.ShardId;
|
||||||
import org.elasticsearch.indices.IndexAlreadyExistsException;
|
import org.elasticsearch.indices.IndexAlreadyExistsException;
|
||||||
import org.elasticsearch.indices.IndicesService;
|
import org.elasticsearch.indices.IndicesService;
|
||||||
|
import org.elasticsearch.tasks.Task;
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
import org.elasticsearch.transport.TransportService;
|
import org.elasticsearch.transport.TransportService;
|
||||||
|
|
||||||
@ -69,27 +70,27 @@ public class TransportDeleteAction extends TransportReplicationAction<DeleteRequ
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void doExecute(final DeleteRequest request, final ActionListener<DeleteResponse> listener) {
|
protected void doExecute(Task task, final DeleteRequest request, final ActionListener<DeleteResponse> listener) {
|
||||||
ClusterState state = clusterService.state();
|
ClusterState state = clusterService.state();
|
||||||
if (autoCreateIndex.shouldAutoCreate(request.index(), state)) {
|
if (autoCreateIndex.shouldAutoCreate(request.index(), state)) {
|
||||||
createIndexAction.execute(new CreateIndexRequest(request).index(request.index()).cause("auto(delete api)").masterNodeTimeout(request.timeout()), new ActionListener<CreateIndexResponse>() {
|
createIndexAction.execute(task, new CreateIndexRequest().index(request.index()).cause("auto(delete api)").masterNodeTimeout(request.timeout()), new ActionListener<CreateIndexResponse>() {
|
||||||
@Override
|
@Override
|
||||||
public void onResponse(CreateIndexResponse result) {
|
public void onResponse(CreateIndexResponse result) {
|
||||||
innerExecute(request, listener);
|
innerExecute(task, request, listener);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(Throwable e) {
|
public void onFailure(Throwable e) {
|
||||||
if (ExceptionsHelper.unwrapCause(e) instanceof IndexAlreadyExistsException) {
|
if (ExceptionsHelper.unwrapCause(e) instanceof IndexAlreadyExistsException) {
|
||||||
// we have the index, do it
|
// we have the index, do it
|
||||||
innerExecute(request, listener);
|
innerExecute(task, request, listener);
|
||||||
} else {
|
} else {
|
||||||
listener.onFailure(e);
|
listener.onFailure(e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
innerExecute(request, listener);
|
innerExecute(task, request, listener);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -114,8 +115,8 @@ public class TransportDeleteAction extends TransportReplicationAction<DeleteRequ
|
|||||||
request.setShardId(shardId);
|
request.setShardId(shardId);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void innerExecute(final DeleteRequest request, final ActionListener<DeleteResponse> listener) {
|
private void innerExecute(Task task, final DeleteRequest request, final ActionListener<DeleteResponse> listener) {
|
||||||
super.doExecute(request, listener);
|
super.doExecute(task, request, listener);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -112,7 +112,7 @@ public class TransportExplainAction extends TransportSingleShardAction<ExplainRe
|
|||||||
Term uidTerm = new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(request.type(), request.id()));
|
Term uidTerm = new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(request.type(), request.id()));
|
||||||
Engine.GetResult result = indexShard.get(new Engine.Get(false, uidTerm));
|
Engine.GetResult result = indexShard.get(new Engine.Get(false, uidTerm));
|
||||||
if (!result.exists()) {
|
if (!result.exists()) {
|
||||||
return new ExplainResponse(shardId.getIndex(), request.type(), request.id(), false);
|
return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), false);
|
||||||
}
|
}
|
||||||
|
|
||||||
SearchContext context = new DefaultSearchContext(0,
|
SearchContext context = new DefaultSearchContext(0,
|
||||||
@ -135,9 +135,9 @@ public class TransportExplainAction extends TransportSingleShardAction<ExplainRe
|
|||||||
// because we are working in the same searcher in engineGetResult we can be sure that a
|
// because we are working in the same searcher in engineGetResult we can be sure that a
|
||||||
// doc isn't deleted between the initial get and this call.
|
// doc isn't deleted between the initial get and this call.
|
||||||
GetResult getResult = indexShard.getService().get(result, request.id(), request.type(), request.fields(), request.fetchSourceContext(), false);
|
GetResult getResult = indexShard.getService().get(result, request.id(), request.type(), request.fields(), request.fetchSourceContext(), false);
|
||||||
return new ExplainResponse(shardId.getIndex(), request.type(), request.id(), true, explanation, getResult);
|
return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), true, explanation, getResult);
|
||||||
} else {
|
} else {
|
||||||
return new ExplainResponse(shardId.getIndex(), request.type(), request.id(), true, explanation);
|
return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), true, explanation);
|
||||||
}
|
}
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new ElasticsearchException("Could not explain", e);
|
throw new ElasticsearchException("Could not explain", e);
|
||||||
|
@ -19,7 +19,6 @@
|
|||||||
|
|
||||||
package org.elasticsearch.action.get;
|
package org.elasticsearch.action.get;
|
||||||
|
|
||||||
import org.elasticsearch.action.ActionRequest;
|
|
||||||
import org.elasticsearch.action.ActionRequestValidationException;
|
import org.elasticsearch.action.ActionRequestValidationException;
|
||||||
import org.elasticsearch.action.RealtimeRequest;
|
import org.elasticsearch.action.RealtimeRequest;
|
||||||
import org.elasticsearch.action.ValidateActions;
|
import org.elasticsearch.action.ValidateActions;
|
||||||
@ -72,8 +71,7 @@ public class GetRequest extends SingleShardRequest<GetRequest> implements Realti
|
|||||||
* Copy constructor that creates a new get request that is a copy of the one provided as an argument.
|
* Copy constructor that creates a new get request that is a copy of the one provided as an argument.
|
||||||
* The new request will inherit though headers and context from the original request that caused it.
|
* The new request will inherit though headers and context from the original request that caused it.
|
||||||
*/
|
*/
|
||||||
public GetRequest(GetRequest getRequest, ActionRequest originalRequest) {
|
public GetRequest(GetRequest getRequest) {
|
||||||
super(originalRequest);
|
|
||||||
this.index = getRequest.index;
|
this.index = getRequest.index;
|
||||||
this.type = getRequest.type;
|
this.type = getRequest.type;
|
||||||
this.id = getRequest.id;
|
this.id = getRequest.id;
|
||||||
@ -98,14 +96,6 @@ public class GetRequest extends SingleShardRequest<GetRequest> implements Realti
|
|||||||
this.type = "_all";
|
this.type = "_all";
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Constructs a new get request starting from the provided request, meaning that it will
|
|
||||||
* inherit its headers and context, and against the specified index.
|
|
||||||
*/
|
|
||||||
public GetRequest(ActionRequest request, String index) {
|
|
||||||
super(request, index);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructs a new get request against the specified index with the type and id.
|
* Constructs a new get request against the specified index with the type and id.
|
||||||
*
|
*
|
||||||
|
@ -266,18 +266,6 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
|
|||||||
|
|
||||||
List<Item> items = new ArrayList<>();
|
List<Item> items = new ArrayList<>();
|
||||||
|
|
||||||
public MultiGetRequest() {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a multi get request caused by some other request, which is provided as an
|
|
||||||
* argument so that its headers and context can be copied to the new request
|
|
||||||
*/
|
|
||||||
public MultiGetRequest(ActionRequest request) {
|
|
||||||
super(request);
|
|
||||||
}
|
|
||||||
|
|
||||||
public List<Item> getItems() {
|
public List<Item> getItems() {
|
||||||
return this.items;
|
return this.items;
|
||||||
}
|
}
|
||||||
|
@ -45,7 +45,7 @@ public class MultiGetShardRequest extends SingleShardRequest<MultiGetShardReques
|
|||||||
}
|
}
|
||||||
|
|
||||||
MultiGetShardRequest(MultiGetRequest multiGetRequest, String index, int shardId) {
|
MultiGetShardRequest(MultiGetRequest multiGetRequest, String index, int shardId) {
|
||||||
super(multiGetRequest, index);
|
super(index);
|
||||||
this.shardId = shardId;
|
this.shardId = shardId;
|
||||||
locations = new IntArrayList();
|
locations = new IntArrayList();
|
||||||
items = new ArrayList<>();
|
items = new ArrayList<>();
|
||||||
|
@ -79,7 +79,7 @@ public class TransportMultiGetAction extends HandledTransportAction<MultiGetRequ
|
|||||||
.getShards(clusterState, concreteSingleIndex, item.type(), item.id(), item.routing(), null).shardId();
|
.getShards(clusterState, concreteSingleIndex, item.type(), item.id(), item.routing(), null).shardId();
|
||||||
MultiGetShardRequest shardRequest = shardRequests.get(shardId);
|
MultiGetShardRequest shardRequest = shardRequests.get(shardId);
|
||||||
if (shardRequest == null) {
|
if (shardRequest == null) {
|
||||||
shardRequest = new MultiGetShardRequest(request, shardId.index().name(), shardId.id());
|
shardRequest = new MultiGetShardRequest(request, shardId.getIndexName(), shardId.id());
|
||||||
shardRequests.put(shardId, shardRequest);
|
shardRequests.put(shardId, shardRequest);
|
||||||
}
|
}
|
||||||
shardRequest.add(i, item);
|
shardRequest.add(i, item);
|
||||||
|
@ -21,7 +21,6 @@ package org.elasticsearch.action.index;
|
|||||||
|
|
||||||
import org.elasticsearch.ElasticsearchGenerationException;
|
import org.elasticsearch.ElasticsearchGenerationException;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.action.ActionRequest;
|
|
||||||
import org.elasticsearch.action.ActionRequestValidationException;
|
import org.elasticsearch.action.ActionRequestValidationException;
|
||||||
import org.elasticsearch.action.DocumentRequest;
|
import org.elasticsearch.action.DocumentRequest;
|
||||||
import org.elasticsearch.action.RoutingMissingException;
|
import org.elasticsearch.action.RoutingMissingException;
|
||||||
@ -160,20 +159,12 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
|||||||
public IndexRequest() {
|
public IndexRequest() {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates an index request caused by some other request, which is provided as an
|
|
||||||
* argument so that its headers and context can be copied to the new request
|
|
||||||
*/
|
|
||||||
public IndexRequest(ActionRequest request) {
|
|
||||||
super(request);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Copy constructor that creates a new index request that is a copy of the one provided as an argument.
|
* Copy constructor that creates a new index request that is a copy of the one provided as an argument.
|
||||||
* The new request will inherit though headers and context from the original request that caused it.
|
* The new request will inherit though headers and context from the original request that caused it.
|
||||||
*/
|
*/
|
||||||
public IndexRequest(IndexRequest indexRequest, ActionRequest originalRequest) {
|
public IndexRequest(IndexRequest indexRequest) {
|
||||||
super(indexRequest, originalRequest);
|
super(indexRequest);
|
||||||
this.type = indexRequest.type;
|
this.type = indexRequest.type;
|
||||||
this.id = indexRequest.id;
|
this.id = indexRequest.id;
|
||||||
this.routing = indexRequest.routing;
|
this.routing = indexRequest.routing;
|
||||||
|
@ -48,6 +48,7 @@ import org.elasticsearch.index.shard.ShardId;
|
|||||||
import org.elasticsearch.index.translog.Translog;
|
import org.elasticsearch.index.translog.Translog;
|
||||||
import org.elasticsearch.indices.IndexAlreadyExistsException;
|
import org.elasticsearch.indices.IndexAlreadyExistsException;
|
||||||
import org.elasticsearch.indices.IndicesService;
|
import org.elasticsearch.indices.IndicesService;
|
||||||
|
import org.elasticsearch.tasks.Task;
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
import org.elasticsearch.transport.TransportService;
|
import org.elasticsearch.transport.TransportService;
|
||||||
|
|
||||||
@ -84,19 +85,19 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void doExecute(final IndexRequest request, final ActionListener<IndexResponse> listener) {
|
protected void doExecute(Task task, final IndexRequest request, final ActionListener<IndexResponse> listener) {
|
||||||
// if we don't have a master, we don't have metadata, that's fine, let it find a master using create index API
|
// if we don't have a master, we don't have metadata, that's fine, let it find a master using create index API
|
||||||
ClusterState state = clusterService.state();
|
ClusterState state = clusterService.state();
|
||||||
if (autoCreateIndex.shouldAutoCreate(request.index(), state)) {
|
if (autoCreateIndex.shouldAutoCreate(request.index(), state)) {
|
||||||
CreateIndexRequest createIndexRequest = new CreateIndexRequest(request);
|
CreateIndexRequest createIndexRequest = new CreateIndexRequest();
|
||||||
createIndexRequest.index(request.index());
|
createIndexRequest.index(request.index());
|
||||||
createIndexRequest.mapping(request.type());
|
createIndexRequest.mapping(request.type());
|
||||||
createIndexRequest.cause("auto(index api)");
|
createIndexRequest.cause("auto(index api)");
|
||||||
createIndexRequest.masterNodeTimeout(request.timeout());
|
createIndexRequest.masterNodeTimeout(request.timeout());
|
||||||
createIndexAction.execute(createIndexRequest, new ActionListener<CreateIndexResponse>() {
|
createIndexAction.execute(task, createIndexRequest, new ActionListener<CreateIndexResponse>() {
|
||||||
@Override
|
@Override
|
||||||
public void onResponse(CreateIndexResponse result) {
|
public void onResponse(CreateIndexResponse result) {
|
||||||
innerExecute(request, listener);
|
innerExecute(task, request, listener);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -104,7 +105,7 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
|
|||||||
if (ExceptionsHelper.unwrapCause(e) instanceof IndexAlreadyExistsException) {
|
if (ExceptionsHelper.unwrapCause(e) instanceof IndexAlreadyExistsException) {
|
||||||
// we have the index, do it
|
// we have the index, do it
|
||||||
try {
|
try {
|
||||||
innerExecute(request, listener);
|
innerExecute(task, request, listener);
|
||||||
} catch (Throwable e1) {
|
} catch (Throwable e1) {
|
||||||
listener.onFailure(e1);
|
listener.onFailure(e1);
|
||||||
}
|
}
|
||||||
@ -114,7 +115,7 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
innerExecute(request, listener);
|
innerExecute(task, request, listener);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -129,8 +130,8 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
|
|||||||
request.setShardId(shardId);
|
request.setShardId(shardId);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void innerExecute(final IndexRequest request, final ActionListener<IndexResponse> listener) {
|
private void innerExecute(Task task, final IndexRequest request, final ActionListener<IndexResponse> listener) {
|
||||||
super.doExecute(request, listener);
|
super.doExecute(task, request, listener);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -146,7 +147,7 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
|
|||||||
MappingMetaData mappingMd = indexMetaData.mappingOrDefault(request.type());
|
MappingMetaData mappingMd = indexMetaData.mappingOrDefault(request.type());
|
||||||
if (mappingMd != null && mappingMd.routing().required()) {
|
if (mappingMd != null && mappingMd.routing().required()) {
|
||||||
if (request.routing() == null) {
|
if (request.routing() == null) {
|
||||||
throw new RoutingMissingException(request.shardId().getIndex(), request.type(), request.id());
|
throw new RoutingMissingException(request.shardId().getIndex().getName(), request.type(), request.id());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -176,7 +177,7 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
|
|||||||
*/
|
*/
|
||||||
public static Engine.Index executeIndexRequestOnReplica(IndexRequest request, IndexShard indexShard) {
|
public static Engine.Index executeIndexRequestOnReplica(IndexRequest request, IndexShard indexShard) {
|
||||||
final ShardId shardId = indexShard.shardId();
|
final ShardId shardId = indexShard.shardId();
|
||||||
SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, request.source()).index(shardId.getIndex()).type(request.type()).id(request.id())
|
SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, request.source()).index(shardId.getIndexName()).type(request.type()).id(request.id())
|
||||||
.routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl());
|
.routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl());
|
||||||
|
|
||||||
final Engine.Index operation = indexShard.prepareIndexOnReplica(sourceToParse, request.version(), request.versionType());
|
final Engine.Index operation = indexShard.prepareIndexOnReplica(sourceToParse, request.version(), request.versionType());
|
||||||
@ -204,7 +205,7 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
|
|||||||
Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
|
Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
|
||||||
final ShardId shardId = indexShard.shardId();
|
final ShardId shardId = indexShard.shardId();
|
||||||
if (update != null) {
|
if (update != null) {
|
||||||
final String indexName = shardId.getIndex();
|
final String indexName = shardId.getIndexName();
|
||||||
mappingUpdatedAction.updateMappingOnMasterSynchronously(indexName, request.type(), update);
|
mappingUpdatedAction.updateMappingOnMasterSynchronously(indexName, request.type(), update);
|
||||||
operation = prepareIndexOperationOnPrimary(request, indexShard);
|
operation = prepareIndexOperationOnPrimary(request, indexShard);
|
||||||
update = operation.parsedDoc().dynamicMappingsUpdate();
|
update = operation.parsedDoc().dynamicMappingsUpdate();
|
||||||
|
@ -158,7 +158,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio
|
|||||||
if (itemResponses.isEmpty()) {
|
if (itemResponses.isEmpty()) {
|
||||||
return bulkRequest;
|
return bulkRequest;
|
||||||
} else {
|
} else {
|
||||||
BulkRequest modifiedBulkRequest = new BulkRequest(bulkRequest);
|
BulkRequest modifiedBulkRequest = new BulkRequest();
|
||||||
modifiedBulkRequest.refresh(bulkRequest.refresh());
|
modifiedBulkRequest.refresh(bulkRequest.refresh());
|
||||||
modifiedBulkRequest.consistencyLevel(bulkRequest.consistencyLevel());
|
modifiedBulkRequest.consistencyLevel(bulkRequest.consistencyLevel());
|
||||||
modifiedBulkRequest.timeout(bulkRequest.timeout());
|
modifiedBulkRequest.timeout(bulkRequest.timeout());
|
||||||
|
@ -66,7 +66,6 @@ public class PercolateRequest extends BroadcastRequest<PercolateRequest> impleme
|
|||||||
}
|
}
|
||||||
|
|
||||||
PercolateRequest(PercolateRequest request, BytesReference docSource) {
|
PercolateRequest(PercolateRequest request, BytesReference docSource) {
|
||||||
super(request);
|
|
||||||
this.indices = request.indices();
|
this.indices = request.indices();
|
||||||
this.documentType = request.documentType();
|
this.documentType = request.documentType();
|
||||||
this.routing = request.routing();
|
this.routing = request.routing();
|
||||||
@ -274,7 +273,7 @@ public class PercolateRequest extends BroadcastRequest<PercolateRequest> impleme
|
|||||||
source = in.readBytesReference();
|
source = in.readBytesReference();
|
||||||
docSource = in.readBytesReference();
|
docSource = in.readBytesReference();
|
||||||
if (in.readBoolean()) {
|
if (in.readBoolean()) {
|
||||||
getRequest = new GetRequest(null);
|
getRequest = new GetRequest();
|
||||||
getRequest.readFrom(in);
|
getRequest.readFrom(in);
|
||||||
}
|
}
|
||||||
onlyCount = in.readBoolean();
|
onlyCount = in.readBoolean();
|
||||||
|
@ -57,7 +57,7 @@ public class PercolateShardResponse extends BroadcastShardResponse {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public PercolateShardResponse(TopDocs topDocs, Map<Integer, String> ids, Map<Integer, Map<String, HighlightField>> hls, PercolateContext context) {
|
public PercolateShardResponse(TopDocs topDocs, Map<Integer, String> ids, Map<Integer, Map<String, HighlightField>> hls, PercolateContext context) {
|
||||||
super(new ShardId(context.shardTarget().getIndex(), context.shardTarget().getShardId()));
|
super(context.indexShard().shardId());
|
||||||
this.topDocs = topDocs;
|
this.topDocs = topDocs;
|
||||||
this.ids = ids;
|
this.ids = ids;
|
||||||
this.hls = hls;
|
this.hls = hls;
|
||||||
|
@ -97,7 +97,7 @@ public class TransportMultiPercolateAction extends HandledTransportAction<MultiP
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!existingDocsRequests.isEmpty()) {
|
if (!existingDocsRequests.isEmpty()) {
|
||||||
final MultiGetRequest multiGetRequest = new MultiGetRequest(request);
|
final MultiGetRequest multiGetRequest = new MultiGetRequest();
|
||||||
for (GetRequest getRequest : existingDocsRequests) {
|
for (GetRequest getRequest : existingDocsRequests) {
|
||||||
multiGetRequest.add(
|
multiGetRequest.add(
|
||||||
new MultiGetRequest.Item(getRequest.index(), getRequest.type(), getRequest.id())
|
new MultiGetRequest.Item(getRequest.index(), getRequest.type(), getRequest.id())
|
||||||
@ -200,7 +200,7 @@ public class TransportMultiPercolateAction extends HandledTransportAction<MultiP
|
|||||||
ShardId shardId = shard.shardId();
|
ShardId shardId = shard.shardId();
|
||||||
TransportShardMultiPercolateAction.Request requests = requestsByShard.get(shardId);
|
TransportShardMultiPercolateAction.Request requests = requestsByShard.get(shardId);
|
||||||
if (requests == null) {
|
if (requests == null) {
|
||||||
requestsByShard.put(shardId, requests = new TransportShardMultiPercolateAction.Request(multiPercolateRequest, shardId.getIndex(), shardId.getId(), percolateRequest.preference()));
|
requestsByShard.put(shardId, requests = new TransportShardMultiPercolateAction.Request(shardId.getIndexName(), shardId.getId(), percolateRequest.preference()));
|
||||||
}
|
}
|
||||||
logger.trace("Adding shard[{}] percolate request for item[{}]", shardId, slot);
|
logger.trace("Adding shard[{}] percolate request for item[{}]", shardId, slot);
|
||||||
requests.add(new TransportShardMultiPercolateAction.Request.Item(slot, new PercolateShardRequest(shardId, percolateRequest)));
|
requests.add(new TransportShardMultiPercolateAction.Request.Item(slot, new PercolateShardRequest(shardId, percolateRequest)));
|
||||||
|
@ -41,6 +41,7 @@ import org.elasticsearch.common.settings.Settings;
|
|||||||
import org.elasticsearch.index.engine.DocumentMissingException;
|
import org.elasticsearch.index.engine.DocumentMissingException;
|
||||||
import org.elasticsearch.percolator.PercolateException;
|
import org.elasticsearch.percolator.PercolateException;
|
||||||
import org.elasticsearch.percolator.PercolatorService;
|
import org.elasticsearch.percolator.PercolatorService;
|
||||||
|
import org.elasticsearch.tasks.Task;
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
import org.elasticsearch.transport.TransportService;
|
import org.elasticsearch.transport.TransportService;
|
||||||
|
|
||||||
@ -70,11 +71,11 @@ public class TransportPercolateAction extends TransportBroadcastAction<Percolate
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void doExecute(final PercolateRequest request, final ActionListener<PercolateResponse> listener) {
|
protected void doExecute(Task task, final PercolateRequest request, final ActionListener<PercolateResponse> listener) {
|
||||||
request.startTime = System.currentTimeMillis();
|
request.startTime = System.currentTimeMillis();
|
||||||
if (request.getRequest() != null) {
|
if (request.getRequest() != null) {
|
||||||
//create a new get request to make sure it has the same headers and context as the original percolate request
|
//create a new get request to make sure it has the same headers and context as the original percolate request
|
||||||
GetRequest getRequest = new GetRequest(request.getRequest(), request);
|
GetRequest getRequest = new GetRequest(request.getRequest());
|
||||||
getAction.execute(getRequest, new ActionListener<GetResponse>() {
|
getAction.execute(getRequest, new ActionListener<GetResponse>() {
|
||||||
@Override
|
@Override
|
||||||
public void onResponse(GetResponse getResponse) {
|
public void onResponse(GetResponse getResponse) {
|
||||||
@ -84,7 +85,7 @@ public class TransportPercolateAction extends TransportBroadcastAction<Percolate
|
|||||||
}
|
}
|
||||||
|
|
||||||
BytesReference docSource = getResponse.getSourceAsBytesRef();
|
BytesReference docSource = getResponse.getSourceAsBytesRef();
|
||||||
TransportPercolateAction.super.doExecute(new PercolateRequest(request, docSource), listener);
|
TransportPercolateAction.super.doExecute(task, new PercolateRequest(request, docSource), listener);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -93,7 +94,7 @@ public class TransportPercolateAction extends TransportBroadcastAction<Percolate
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
super.doExecute(request, listener);
|
super.doExecute(task, request, listener);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -150,7 +151,7 @@ public class TransportPercolateAction extends TransportBroadcastAction<Percolate
|
|||||||
} else {
|
} else {
|
||||||
PercolatorService.ReduceResult result = null;
|
PercolatorService.ReduceResult result = null;
|
||||||
try {
|
try {
|
||||||
result = percolatorService.reduce(onlyCount, shardResults, request);
|
result = percolatorService.reduce(onlyCount, shardResults);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new ElasticsearchException("error during reduce phase", e);
|
throw new ElasticsearchException("error during reduce phase", e);
|
||||||
}
|
}
|
||||||
|
@ -117,8 +117,8 @@ public class TransportShardMultiPercolateAction extends TransportSingleShardActi
|
|||||||
public Request() {
|
public Request() {
|
||||||
}
|
}
|
||||||
|
|
||||||
Request(MultiPercolateRequest multiPercolateRequest, String concreteIndex, int shardId, String preference) {
|
Request(String concreteIndex, int shardId, String preference) {
|
||||||
super(multiPercolateRequest, concreteIndex);
|
super(concreteIndex);
|
||||||
this.shardId = shardId;
|
this.shardId = shardId;
|
||||||
this.preference = preference;
|
this.preference = preference;
|
||||||
this.items = new ArrayList<>();
|
this.items = new ArrayList<>();
|
||||||
|
@ -37,17 +37,6 @@ public class ClearScrollRequest extends ActionRequest<ClearScrollRequest> {
|
|||||||
|
|
||||||
private List<String> scrollIds;
|
private List<String> scrollIds;
|
||||||
|
|
||||||
public ClearScrollRequest() {
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a clear scroll request caused by some other request, which is provided as an
|
|
||||||
* argument so that its headers and context can be copied to the new request
|
|
||||||
*/
|
|
||||||
public ClearScrollRequest(ActionRequest request) {
|
|
||||||
super(request);
|
|
||||||
}
|
|
||||||
|
|
||||||
public List<String> getScrollIds() {
|
public List<String> getScrollIds() {
|
||||||
return scrollIds;
|
return scrollIds;
|
||||||
}
|
}
|
||||||
|
@ -80,8 +80,7 @@ public class SearchRequest extends ActionRequest<SearchRequest> implements Indic
|
|||||||
* Copy constructor that creates a new search request that is a copy of the one provided as an argument.
|
* Copy constructor that creates a new search request that is a copy of the one provided as an argument.
|
||||||
* The new request will inherit though headers and context from the original request that caused it.
|
* The new request will inherit though headers and context from the original request that caused it.
|
||||||
*/
|
*/
|
||||||
public SearchRequest(SearchRequest searchRequest, ActionRequest originalRequest) {
|
public SearchRequest(SearchRequest searchRequest) {
|
||||||
super(originalRequest);
|
|
||||||
this.searchType = searchRequest.searchType;
|
this.searchType = searchRequest.searchType;
|
||||||
this.indices = searchRequest.indices;
|
this.indices = searchRequest.indices;
|
||||||
this.routing = searchRequest.routing;
|
this.routing = searchRequest.routing;
|
||||||
@ -94,15 +93,6 @@ public class SearchRequest extends ActionRequest<SearchRequest> implements Indic
|
|||||||
this.indicesOptions = searchRequest.indicesOptions;
|
this.indicesOptions = searchRequest.indicesOptions;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Constructs a new search request starting from the provided request, meaning that it will
|
|
||||||
* inherit its headers and context
|
|
||||||
*/
|
|
||||||
public SearchRequest(ActionRequest request) {
|
|
||||||
super(request);
|
|
||||||
this.source = new SearchSourceBuilder();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructs a new search request against the indices. No indices provided here means that search
|
* Constructs a new search request against the indices. No indices provided here means that search
|
||||||
* will run against all indices.
|
* will run against all indices.
|
||||||
|
@ -28,7 +28,6 @@ import org.elasticsearch.index.query.QueryBuilder;
|
|||||||
import org.elasticsearch.script.Script;
|
import org.elasticsearch.script.Script;
|
||||||
import org.elasticsearch.script.Template;
|
import org.elasticsearch.script.Template;
|
||||||
import org.elasticsearch.search.Scroll;
|
import org.elasticsearch.search.Scroll;
|
||||||
import org.elasticsearch.search.searchafter.SearchAfterBuilder;
|
|
||||||
import org.elasticsearch.search.aggregations.AggregatorFactory;
|
import org.elasticsearch.search.aggregations.AggregatorFactory;
|
||||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorFactory;
|
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorFactory;
|
||||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||||
|
@ -46,14 +46,6 @@ public class SearchScrollRequest extends ActionRequest<SearchScrollRequest> {
|
|||||||
this.scrollId = scrollId;
|
this.scrollId = scrollId;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a scroll request caused by some other request, which is provided as an
|
|
||||||
* argument so that its headers and context can be copied to the new request
|
|
||||||
*/
|
|
||||||
public SearchScrollRequest(ActionRequest request) {
|
|
||||||
super(request);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ActionRequestValidationException validate() {
|
public ActionRequestValidationException validate() {
|
||||||
ActionRequestValidationException validationException = null;
|
ActionRequestValidationException validationException = null;
|
||||||
|
@ -59,7 +59,7 @@ public class TransportMultiSearchAction extends HandledTransportAction<MultiSear
|
|||||||
final AtomicInteger counter = new AtomicInteger(responses.length());
|
final AtomicInteger counter = new AtomicInteger(responses.length());
|
||||||
for (int i = 0; i < responses.length(); i++) {
|
for (int i = 0; i < responses.length(); i++) {
|
||||||
final int index = i;
|
final int index = i;
|
||||||
SearchRequest searchRequest = new SearchRequest(request.requests().get(i), request);
|
SearchRequest searchRequest = new SearchRequest(request.requests().get(i));
|
||||||
searchAction.execute(searchRequest, new ActionListener<SearchResponse>() {
|
searchAction.execute(searchRequest, new ActionListener<SearchResponse>() {
|
||||||
@Override
|
@Override
|
||||||
public void onResponse(SearchResponse searchResponse) {
|
public void onResponse(SearchResponse searchResponse) {
|
||||||
|
@ -135,7 +135,7 @@ public class TransportSearchDfsQueryAndFetchAction extends TransportSearchTypeAc
|
|||||||
public void doRun() throws IOException {
|
public void doRun() throws IOException {
|
||||||
sortedShardList = searchPhaseController.sortDocs(true, queryFetchResults);
|
sortedShardList = searchPhaseController.sortDocs(true, queryFetchResults);
|
||||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryFetchResults,
|
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryFetchResults,
|
||||||
queryFetchResults, request);
|
queryFetchResults);
|
||||||
String scrollId = null;
|
String scrollId = null;
|
||||||
if (request.scroll() != null) {
|
if (request.scroll() != null) {
|
||||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
|
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
|
||||||
|
@ -211,7 +211,7 @@ public class TransportSearchDfsQueryThenFetchAction extends TransportSearchTypeA
|
|||||||
@Override
|
@Override
|
||||||
public void doRun() throws IOException {
|
public void doRun() throws IOException {
|
||||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryResults,
|
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryResults,
|
||||||
fetchResults, request);
|
fetchResults);
|
||||||
String scrollId = null;
|
String scrollId = null;
|
||||||
if (request.scroll() != null) {
|
if (request.scroll() != null) {
|
||||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
|
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
|
||||||
|
@ -82,7 +82,7 @@ public class TransportSearchQueryAndFetchAction extends TransportSearchTypeActio
|
|||||||
boolean useScroll = request.scroll() != null;
|
boolean useScroll = request.scroll() != null;
|
||||||
sortedShardList = searchPhaseController.sortDocs(useScroll, firstResults);
|
sortedShardList = searchPhaseController.sortDocs(useScroll, firstResults);
|
||||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, firstResults,
|
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, firstResults,
|
||||||
firstResults, request);
|
firstResults);
|
||||||
String scrollId = null;
|
String scrollId = null;
|
||||||
if (request.scroll() != null) {
|
if (request.scroll() != null) {
|
||||||
scrollId = buildScrollId(request.searchType(), firstResults, null);
|
scrollId = buildScrollId(request.searchType(), firstResults, null);
|
||||||
|
@ -146,7 +146,7 @@ public class TransportSearchQueryThenFetchAction extends TransportSearchTypeActi
|
|||||||
@Override
|
@Override
|
||||||
public void doRun() throws IOException {
|
public void doRun() throws IOException {
|
||||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, firstResults,
|
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, firstResults,
|
||||||
fetchResults, request);
|
fetchResults);
|
||||||
String scrollId = null;
|
String scrollId = null;
|
||||||
if (request.scroll() != null) {
|
if (request.scroll() != null) {
|
||||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
|
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
|
||||||
|
@ -193,7 +193,7 @@ public class TransportSearchScrollQueryAndFetchAction extends AbstractComponent
|
|||||||
private void innerFinishHim() throws Exception {
|
private void innerFinishHim() throws Exception {
|
||||||
ScoreDoc[] sortedShardList = searchPhaseController.sortDocs(true, queryFetchResults);
|
ScoreDoc[] sortedShardList = searchPhaseController.sortDocs(true, queryFetchResults);
|
||||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryFetchResults,
|
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryFetchResults,
|
||||||
queryFetchResults, request);
|
queryFetchResults);
|
||||||
String scrollId = null;
|
String scrollId = null;
|
||||||
if (request.scroll() != null) {
|
if (request.scroll() != null) {
|
||||||
scrollId = request.scrollId();
|
scrollId = request.scrollId();
|
||||||
|
@ -208,7 +208,7 @@ public class TransportSearchScrollQueryThenFetchAction extends AbstractComponent
|
|||||||
IntArrayList docIds = entry.value;
|
IntArrayList docIds = entry.value;
|
||||||
final QuerySearchResult querySearchResult = queryResults.get(entry.index);
|
final QuerySearchResult querySearchResult = queryResults.get(entry.index);
|
||||||
ScoreDoc lastEmittedDoc = lastEmittedDocPerShard[entry.index];
|
ScoreDoc lastEmittedDoc = lastEmittedDocPerShard[entry.index];
|
||||||
ShardFetchRequest shardFetchRequest = new ShardFetchRequest(request, querySearchResult.id(), docIds, lastEmittedDoc);
|
ShardFetchRequest shardFetchRequest = new ShardFetchRequest(querySearchResult.id(), docIds, lastEmittedDoc);
|
||||||
DiscoveryNode node = nodes.get(querySearchResult.shardTarget().nodeId());
|
DiscoveryNode node = nodes.get(querySearchResult.shardTarget().nodeId());
|
||||||
searchService.sendExecuteFetchScroll(node, shardFetchRequest, new ActionListener<FetchSearchResult>() {
|
searchService.sendExecuteFetchScroll(node, shardFetchRequest, new ActionListener<FetchSearchResult>() {
|
||||||
@Override
|
@Override
|
||||||
@ -243,7 +243,7 @@ public class TransportSearchScrollQueryThenFetchAction extends AbstractComponent
|
|||||||
}
|
}
|
||||||
|
|
||||||
private void innerFinishHim() {
|
private void innerFinishHim() {
|
||||||
InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryResults, fetchResults, request);
|
InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryResults, fetchResults);
|
||||||
String scrollId = null;
|
String scrollId = null;
|
||||||
if (request.scroll() != null) {
|
if (request.scroll() != null) {
|
||||||
scrollId = request.scrollId();
|
scrollId = request.scrollId();
|
||||||
|
@ -163,7 +163,7 @@ public abstract class TransportSearchTypeAction extends TransportAction<SearchRe
|
|||||||
if (node == null) {
|
if (node == null) {
|
||||||
onFirstPhaseResult(shardIndex, shard, null, shardIt, new NoShardAvailableActionException(shardIt.shardId()));
|
onFirstPhaseResult(shardIndex, shard, null, shardIt, new NoShardAvailableActionException(shardIt.shardId()));
|
||||||
} else {
|
} else {
|
||||||
String[] filteringAliases = indexNameExpressionResolver.filteringAliases(clusterState, shard.index(), request.indices());
|
String[] filteringAliases = indexNameExpressionResolver.filteringAliases(clusterState, shard.index().getName(), request.indices());
|
||||||
sendExecuteFirstPhase(node, internalSearchRequest(shard, shardsIts.size(), request, filteringAliases, startTime()), new ActionListener<FirstResult>() {
|
sendExecuteFirstPhase(node, internalSearchRequest(shard, shardsIts.size(), request, filteringAliases, startTime()), new ActionListener<FirstResult>() {
|
||||||
@Override
|
@Override
|
||||||
public void onResponse(FirstResult result) {
|
public void onResponse(FirstResult result) {
|
||||||
|
@ -143,7 +143,7 @@ public class TransportSuggestAction extends TransportBroadcastAction<SuggestRequ
|
|||||||
throw new IllegalArgumentException("suggest content missing");
|
throw new IllegalArgumentException("suggest content missing");
|
||||||
}
|
}
|
||||||
final SuggestionSearchContext context = suggestPhase.parseElement().parseInternal(parser, indexService.mapperService(),
|
final SuggestionSearchContext context = suggestPhase.parseElement().parseInternal(parser, indexService.mapperService(),
|
||||||
indexService.fieldData(), request.shardId().getIndex(), request.shardId().id(), request);
|
indexService.fieldData(), request.shardId().getIndexName(), request.shardId().id());
|
||||||
final Suggest result = suggestPhase.execute(context, searcher.searcher());
|
final Suggest result = suggestPhase.execute(context, searcher.searcher());
|
||||||
return new ShardSuggestResponse(request.shardId(), result);
|
return new ShardSuggestResponse(request.shardId(), result);
|
||||||
}
|
}
|
||||||
|
@ -23,111 +23,100 @@ import org.elasticsearch.cluster.ClusterState;
|
|||||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||||
import org.elasticsearch.common.Booleans;
|
import org.elasticsearch.common.Booleans;
|
||||||
import org.elasticsearch.common.Strings;
|
import org.elasticsearch.common.Strings;
|
||||||
|
import org.elasticsearch.common.collect.Tuple;
|
||||||
import org.elasticsearch.common.inject.Inject;
|
import org.elasticsearch.common.inject.Inject;
|
||||||
import org.elasticsearch.common.regex.Regex;
|
import org.elasticsearch.common.regex.Regex;
|
||||||
import org.elasticsearch.common.settings.Setting;
|
import org.elasticsearch.common.settings.Setting;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.index.mapper.MapperService;
|
import org.elasticsearch.index.mapper.MapperService;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Encapsulates the logic of whether a new index should be automatically created when
|
* Encapsulates the logic of whether a new index should be automatically created when
|
||||||
* a write operation is about to happen in a non existing index.
|
* a write operation is about to happen in a non existing index.
|
||||||
*/
|
*/
|
||||||
public final class AutoCreateIndex {
|
public final class AutoCreateIndex {
|
||||||
|
|
||||||
private final boolean needToCheck;
|
|
||||||
private final boolean globallyDisabled;
|
|
||||||
private final boolean dynamicMappingDisabled;
|
|
||||||
private final String[] matches;
|
|
||||||
private final String[] matches2;
|
|
||||||
private final IndexNameExpressionResolver resolver;
|
|
||||||
public static final Setting<AutoCreate> AUTO_CREATE_INDEX_SETTING = new Setting<>("action.auto_create_index", "true", AutoCreate::new, false, Setting.Scope.CLUSTER);
|
public static final Setting<AutoCreate> AUTO_CREATE_INDEX_SETTING = new Setting<>("action.auto_create_index", "true", AutoCreate::new, false, Setting.Scope.CLUSTER);
|
||||||
|
|
||||||
|
private final boolean dynamicMappingDisabled;
|
||||||
|
private final IndexNameExpressionResolver resolver;
|
||||||
|
private final AutoCreate autoCreate;
|
||||||
|
|
||||||
@Inject
|
@Inject
|
||||||
public AutoCreateIndex(Settings settings, IndexNameExpressionResolver resolver) {
|
public AutoCreateIndex(Settings settings, IndexNameExpressionResolver resolver) {
|
||||||
this.resolver = resolver;
|
this.resolver = resolver;
|
||||||
dynamicMappingDisabled = !MapperService.INDEX_MAPPER_DYNAMIC_SETTING.get(settings);
|
dynamicMappingDisabled = !MapperService.INDEX_MAPPER_DYNAMIC_SETTING.get(settings);
|
||||||
final AutoCreate autoCreate = AUTO_CREATE_INDEX_SETTING.get(settings);
|
this.autoCreate = AUTO_CREATE_INDEX_SETTING.get(settings);
|
||||||
if (autoCreate.autoCreateIndex) {
|
|
||||||
needToCheck = true;
|
|
||||||
globallyDisabled = false;
|
|
||||||
matches = autoCreate.indices;
|
|
||||||
if (matches != null) {
|
|
||||||
matches2 = new String[matches.length];
|
|
||||||
for (int i = 0; i < matches.length; i++) {
|
|
||||||
matches2[i] = matches[i].substring(1);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
matches2 = null;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
needToCheck = false;
|
|
||||||
globallyDisabled = true;
|
|
||||||
matches = null;
|
|
||||||
matches2 = null;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Do we really need to check if an index should be auto created?
|
* Do we really need to check if an index should be auto created?
|
||||||
*/
|
*/
|
||||||
public boolean needToCheck() {
|
public boolean needToCheck() {
|
||||||
return this.needToCheck;
|
return this.autoCreate.autoCreateIndex;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Should the index be auto created?
|
* Should the index be auto created?
|
||||||
*/
|
*/
|
||||||
public boolean shouldAutoCreate(String index, ClusterState state) {
|
public boolean shouldAutoCreate(String index, ClusterState state) {
|
||||||
if (!needToCheck) {
|
if (autoCreate.autoCreateIndex == false) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
boolean exists = resolver.hasIndexOrAlias(index, state);
|
if (dynamicMappingDisabled) {
|
||||||
if (exists) {
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (globallyDisabled || dynamicMappingDisabled) {
|
if (resolver.hasIndexOrAlias(index, state)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
// matches not set, default value of "true"
|
// matches not set, default value of "true"
|
||||||
if (matches == null) {
|
if (autoCreate.expressions.isEmpty()) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
for (int i = 0; i < matches.length; i++) {
|
for (Tuple<String, Boolean> expression : autoCreate.expressions) {
|
||||||
char c = matches[i].charAt(0);
|
String indexExpression = expression.v1();
|
||||||
if (c == '-') {
|
boolean include = expression.v2();
|
||||||
if (Regex.simpleMatch(matches2[i], index)) {
|
if (Regex.simpleMatch(indexExpression, index)) {
|
||||||
return false;
|
return include;
|
||||||
}
|
|
||||||
} else if (c == '+') {
|
|
||||||
if (Regex.simpleMatch(matches2[i], index)) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (Regex.simpleMatch(matches[i], index)) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
public static class AutoCreate {
|
private static class AutoCreate {
|
||||||
private final boolean autoCreateIndex;
|
private final boolean autoCreateIndex;
|
||||||
private final String[] indices;
|
private final List<Tuple<String, Boolean>> expressions;
|
||||||
|
|
||||||
public AutoCreate(String value) {
|
private AutoCreate(String value) {
|
||||||
boolean autoCreateIndex;
|
boolean autoCreateIndex;
|
||||||
String[] indices = null;
|
List<Tuple<String, Boolean>> expressions = new ArrayList<>();
|
||||||
try {
|
try {
|
||||||
autoCreateIndex = Booleans.parseBooleanExact(value);
|
autoCreateIndex = Booleans.parseBooleanExact(value);
|
||||||
} catch (IllegalArgumentException ex) {
|
} catch (IllegalArgumentException ex) {
|
||||||
try {
|
try {
|
||||||
indices = Strings.commaDelimitedListToStringArray(value);
|
String[] patterns = Strings.commaDelimitedListToStringArray(value);
|
||||||
for (String string : indices) {
|
for (String pattern : patterns) {
|
||||||
if (string == null || string.length() == 0) {
|
if (pattern == null || pattern.length() == 0) {
|
||||||
throw new IllegalArgumentException("Can't parse [" + value + "] for setting [action.auto_create_index] must be either [true, false, or a comma seperated list of index patterns]");
|
throw new IllegalArgumentException("Can't parse [" + value + "] for setting [action.auto_create_index] must be either [true, false, or a comma separated list of index patterns]");
|
||||||
}
|
}
|
||||||
|
Tuple<String, Boolean> expression;
|
||||||
|
if (pattern.startsWith("-")) {
|
||||||
|
if (pattern.length() == 1) {
|
||||||
|
throw new IllegalArgumentException("Can't parse [" + value + "] for setting [action.auto_create_index] must contain an index name after [-]");
|
||||||
|
}
|
||||||
|
expression = new Tuple<>(pattern.substring(1), false);
|
||||||
|
} else if(pattern.startsWith("+")) {
|
||||||
|
if (pattern.length() == 1) {
|
||||||
|
throw new IllegalArgumentException("Can't parse [" + value + "] for setting [action.auto_create_index] must contain an index name after [+]");
|
||||||
|
}
|
||||||
|
expression = new Tuple<>(pattern.substring(1), true);
|
||||||
|
} else {
|
||||||
|
expression = new Tuple<>(pattern, true);
|
||||||
|
}
|
||||||
|
expressions.add(expression);
|
||||||
}
|
}
|
||||||
autoCreateIndex = true;
|
autoCreateIndex = true;
|
||||||
} catch (IllegalArgumentException ex1) {
|
} catch (IllegalArgumentException ex1) {
|
||||||
@ -135,7 +124,7 @@ public final class AutoCreateIndex {
|
|||||||
throw ex1;
|
throw ex1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
this.indices = indices;
|
this.expressions = expressions;
|
||||||
this.autoCreateIndex = autoCreateIndex;
|
this.autoCreateIndex = autoCreateIndex;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,66 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.action.support;
|
||||||
|
|
||||||
|
import org.elasticsearch.action.ActionRequest;
|
||||||
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
|
import org.elasticsearch.tasks.Task;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Base class for action requests that can have associated child tasks
|
||||||
|
*/
|
||||||
|
public abstract class ChildTaskActionRequest<Request extends ActionRequest<Request>> extends ActionRequest<Request> {
|
||||||
|
|
||||||
|
private String parentTaskNode;
|
||||||
|
|
||||||
|
private long parentTaskId;
|
||||||
|
|
||||||
|
protected ChildTaskActionRequest() {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setParentTask(String parentTaskNode, long parentTaskId) {
|
||||||
|
this.parentTaskNode = parentTaskNode;
|
||||||
|
this.parentTaskId = parentTaskId;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void readFrom(StreamInput in) throws IOException {
|
||||||
|
super.readFrom(in);
|
||||||
|
parentTaskNode = in.readOptionalString();
|
||||||
|
parentTaskId = in.readLong();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
|
super.writeTo(out);
|
||||||
|
out.writeOptionalString(parentTaskNode);
|
||||||
|
out.writeLong(parentTaskId);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Task createTask(long id, String type, String action) {
|
||||||
|
return new Task(id, type, action, this::getDescription, parentTaskNode, parentTaskId);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -19,10 +19,8 @@
|
|||||||
|
|
||||||
package org.elasticsearch.action.support;
|
package org.elasticsearch.action.support;
|
||||||
|
|
||||||
import org.elasticsearch.Version;
|
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
import org.elasticsearch.tasks.ChildTask;
|
|
||||||
import org.elasticsearch.tasks.Task;
|
import org.elasticsearch.tasks.Task;
|
||||||
import org.elasticsearch.transport.TransportRequest;
|
import org.elasticsearch.transport.TransportRequest;
|
||||||
|
|
||||||
@ -38,11 +36,6 @@ public class ChildTaskRequest extends TransportRequest {
|
|||||||
private long parentTaskId;
|
private long parentTaskId;
|
||||||
|
|
||||||
protected ChildTaskRequest() {
|
protected ChildTaskRequest() {
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
protected ChildTaskRequest(TransportRequest parentTaskRequest) {
|
|
||||||
super(parentTaskRequest);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setParentTask(String parentTaskNode, long parentTaskId) {
|
public void setParentTask(String parentTaskNode, long parentTaskId) {
|
||||||
@ -66,6 +59,6 @@ public class ChildTaskRequest extends TransportRequest {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Task createTask(long id, String type, String action) {
|
public Task createTask(long id, String type, String action) {
|
||||||
return new ChildTask(id, type, action, this::getDescription, parentTaskNode, parentTaskId);
|
return new Task(id, type, action, this::getDescription, parentTaskNode, parentTaskId);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -48,7 +48,7 @@ public class DefaultShardOperationFailedException implements ShardOperationFaile
|
|||||||
}
|
}
|
||||||
|
|
||||||
public DefaultShardOperationFailedException(ElasticsearchException e) {
|
public DefaultShardOperationFailedException(ElasticsearchException e) {
|
||||||
this.index = e.getIndex();
|
this.index = e.getIndex() == null ? null : e.getIndex().getName();
|
||||||
this.shardId = e.getShardId().id();
|
this.shardId = e.getShardId().id();
|
||||||
this.reason = e;
|
this.reason = e;
|
||||||
this.status = e.status();
|
this.status = e.status();
|
||||||
|
@ -44,13 +44,14 @@ public abstract class HandledTransportAction<Request extends ActionRequest<Reque
|
|||||||
class TransportHandler implements TransportRequestHandler<Request> {
|
class TransportHandler implements TransportRequestHandler<Request> {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public final void messageReceived(final Request request, final TransportChannel channel, Task task) throws Exception {
|
public final void messageReceived(Request request, TransportChannel channel) throws Exception {
|
||||||
messageReceived(request, channel);
|
throw new UnsupportedOperationException("the task parameter is required for this operation");
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public final void messageReceived(Request request, TransportChannel channel) throws Exception {
|
public final void messageReceived(final Request request, final TransportChannel channel, Task task) throws Exception {
|
||||||
execute(request, new ActionListener<Response>() {
|
// We already got the task created on the netty layer - no need to create it again on the transport layer
|
||||||
|
execute(task, request, new ActionListener<Response>() {
|
||||||
@Override
|
@Override
|
||||||
public void onResponse(Response response) {
|
public void onResponse(Response response) {
|
||||||
try {
|
try {
|
||||||
|
@ -50,7 +50,7 @@ public final class ThreadedActionListener<Response> implements ActionListener<Re
|
|||||||
this.threadPool = threadPool;
|
this.threadPool = threadPool;
|
||||||
// Should the action listener be threaded or not by default. Action listeners are automatically threaded for client
|
// Should the action listener be threaded or not by default. Action listeners are automatically threaded for client
|
||||||
// nodes and transport client in order to make sure client side code is not executed on IO threads.
|
// nodes and transport client in order to make sure client side code is not executed on IO threads.
|
||||||
this.threadedListener = DiscoveryNode.clientNode(settings) || TransportClient.CLIENT_TYPE.equals(settings.get(Client.CLIENT_TYPE_SETTING));
|
this.threadedListener = DiscoveryNode.clientNode(settings) || TransportClient.CLIENT_TYPE.equals(Client.CLIENT_TYPE_SETTING_S.get(settings));
|
||||||
}
|
}
|
||||||
|
|
||||||
public <Response> ActionListener<Response> wrap(ActionListener<Response> listener) {
|
public <Response> ActionListener<Response> wrap(ActionListener<Response> listener) {
|
||||||
|
@ -66,6 +66,11 @@ public abstract class TransportAction<Request extends ActionRequest<Request>, Re
|
|||||||
return future;
|
return future;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Use this method when the transport action call should result in creation of a new task associated with the call.
|
||||||
|
*
|
||||||
|
* This is a typical behavior.
|
||||||
|
*/
|
||||||
public final Task execute(Request request, ActionListener<Response> listener) {
|
public final Task execute(Request request, ActionListener<Response> listener) {
|
||||||
Task task = taskManager.register("transport", actionName, request);
|
Task task = taskManager.register("transport", actionName, request);
|
||||||
if (task == null) {
|
if (task == null) {
|
||||||
@ -88,7 +93,10 @@ public abstract class TransportAction<Request extends ActionRequest<Request>, Re
|
|||||||
return task;
|
return task;
|
||||||
}
|
}
|
||||||
|
|
||||||
private final void execute(Task task, Request request, ActionListener<Response> listener) {
|
/**
|
||||||
|
* Use this method when the transport action should continue to run in the context of the current task
|
||||||
|
*/
|
||||||
|
public final void execute(Task task, Request request, ActionListener<Response> listener) {
|
||||||
|
|
||||||
ActionRequestValidationException validationException = request.validate();
|
ActionRequestValidationException validationException = request.validate();
|
||||||
if (validationException != null) {
|
if (validationException != null) {
|
||||||
|
@ -37,11 +37,6 @@ public class BroadcastRequest<Request extends BroadcastRequest<Request>> extends
|
|||||||
private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpenAndForbidClosed();
|
private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpenAndForbidClosed();
|
||||||
|
|
||||||
public BroadcastRequest() {
|
public BroadcastRequest() {
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
protected BroadcastRequest(ActionRequest<?> originalRequest) {
|
|
||||||
super(originalRequest);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
protected BroadcastRequest(String[] indices) {
|
protected BroadcastRequest(String[] indices) {
|
||||||
|
@ -21,18 +21,18 @@ package org.elasticsearch.action.support.broadcast;
|
|||||||
|
|
||||||
import org.elasticsearch.action.IndicesRequest;
|
import org.elasticsearch.action.IndicesRequest;
|
||||||
import org.elasticsearch.action.OriginalIndices;
|
import org.elasticsearch.action.OriginalIndices;
|
||||||
|
import org.elasticsearch.action.support.ChildTaskRequest;
|
||||||
import org.elasticsearch.action.support.IndicesOptions;
|
import org.elasticsearch.action.support.IndicesOptions;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
import org.elasticsearch.index.shard.ShardId;
|
import org.elasticsearch.index.shard.ShardId;
|
||||||
import org.elasticsearch.transport.TransportRequest;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
public abstract class BroadcastShardRequest extends TransportRequest implements IndicesRequest {
|
public abstract class BroadcastShardRequest extends ChildTaskRequest implements IndicesRequest {
|
||||||
|
|
||||||
private ShardId shardId;
|
private ShardId shardId;
|
||||||
|
|
||||||
@ -42,7 +42,6 @@ public abstract class BroadcastShardRequest extends TransportRequest implements
|
|||||||
}
|
}
|
||||||
|
|
||||||
protected BroadcastShardRequest(ShardId shardId, BroadcastRequest request) {
|
protected BroadcastShardRequest(ShardId shardId, BroadcastRequest request) {
|
||||||
super(request);
|
|
||||||
this.shardId = shardId;
|
this.shardId = shardId;
|
||||||
this.originalIndices = new OriginalIndices(request);
|
this.originalIndices = new OriginalIndices(request);
|
||||||
}
|
}
|
||||||
|
@ -42,7 +42,7 @@ public abstract class BroadcastShardResponse extends TransportResponse {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public String getIndex() {
|
public String getIndex() {
|
||||||
return this.shardId.getIndex();
|
return this.shardId.getIndexName();
|
||||||
}
|
}
|
||||||
|
|
||||||
public int getShardId() {
|
public int getShardId() {
|
||||||
|
@ -35,6 +35,7 @@ import org.elasticsearch.cluster.routing.ShardIterator;
|
|||||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
import org.elasticsearch.tasks.Task;
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
import org.elasticsearch.transport.BaseTransportResponseHandler;
|
import org.elasticsearch.transport.BaseTransportResponseHandler;
|
||||||
import org.elasticsearch.transport.TransportChannel;
|
import org.elasticsearch.transport.TransportChannel;
|
||||||
@ -69,8 +70,13 @@ public abstract class TransportBroadcastAction<Request extends BroadcastRequest<
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void doExecute(Request request, ActionListener<Response> listener) {
|
protected void doExecute(Task task, Request request, ActionListener<Response> listener) {
|
||||||
new AsyncBroadcastAction(request, listener).start();
|
new AsyncBroadcastAction(task, request, listener).start();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected final void doExecute(Request request, ActionListener<Response> listener) {
|
||||||
|
throw new UnsupportedOperationException("the task parameter is required for this operation");
|
||||||
}
|
}
|
||||||
|
|
||||||
protected abstract Response newResponse(Request request, AtomicReferenceArray shardsResponses, ClusterState clusterState);
|
protected abstract Response newResponse(Request request, AtomicReferenceArray shardsResponses, ClusterState clusterState);
|
||||||
@ -93,6 +99,7 @@ public abstract class TransportBroadcastAction<Request extends BroadcastRequest<
|
|||||||
|
|
||||||
protected class AsyncBroadcastAction {
|
protected class AsyncBroadcastAction {
|
||||||
|
|
||||||
|
private final Task task;
|
||||||
private final Request request;
|
private final Request request;
|
||||||
private final ActionListener<Response> listener;
|
private final ActionListener<Response> listener;
|
||||||
private final ClusterState clusterState;
|
private final ClusterState clusterState;
|
||||||
@ -102,7 +109,8 @@ public abstract class TransportBroadcastAction<Request extends BroadcastRequest<
|
|||||||
private final AtomicInteger counterOps = new AtomicInteger();
|
private final AtomicInteger counterOps = new AtomicInteger();
|
||||||
private final AtomicReferenceArray shardsResponses;
|
private final AtomicReferenceArray shardsResponses;
|
||||||
|
|
||||||
protected AsyncBroadcastAction(Request request, ActionListener<Response> listener) {
|
protected AsyncBroadcastAction(Task task, Request request, ActionListener<Response> listener) {
|
||||||
|
this.task = task;
|
||||||
this.request = request;
|
this.request = request;
|
||||||
this.listener = listener;
|
this.listener = listener;
|
||||||
|
|
||||||
@ -158,6 +166,7 @@ public abstract class TransportBroadcastAction<Request extends BroadcastRequest<
|
|||||||
} else {
|
} else {
|
||||||
try {
|
try {
|
||||||
final ShardRequest shardRequest = newShardRequest(shardIt.size(), shard, request);
|
final ShardRequest shardRequest = newShardRequest(shardIt.size(), shard, request);
|
||||||
|
shardRequest.setParentTask(clusterService.localNode().getId(), task.getId());
|
||||||
DiscoveryNode node = nodes.get(shard.currentNodeId());
|
DiscoveryNode node = nodes.get(shard.currentNodeId());
|
||||||
if (node == null) {
|
if (node == null) {
|
||||||
// no node connected, act as failure
|
// no node connected, act as failure
|
||||||
|
@ -25,6 +25,7 @@ import org.elasticsearch.action.IndicesRequest;
|
|||||||
import org.elasticsearch.action.NoShardAvailableActionException;
|
import org.elasticsearch.action.NoShardAvailableActionException;
|
||||||
import org.elasticsearch.action.ShardOperationFailedException;
|
import org.elasticsearch.action.ShardOperationFailedException;
|
||||||
import org.elasticsearch.action.support.ActionFilters;
|
import org.elasticsearch.action.support.ActionFilters;
|
||||||
|
import org.elasticsearch.action.support.ChildTaskRequest;
|
||||||
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
|
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
|
||||||
import org.elasticsearch.action.support.HandledTransportAction;
|
import org.elasticsearch.action.support.HandledTransportAction;
|
||||||
import org.elasticsearch.action.support.IndicesOptions;
|
import org.elasticsearch.action.support.IndicesOptions;
|
||||||
@ -44,6 +45,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
|||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
import org.elasticsearch.common.io.stream.Streamable;
|
import org.elasticsearch.common.io.stream.Streamable;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
import org.elasticsearch.tasks.Task;
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
import org.elasticsearch.transport.BaseTransportResponseHandler;
|
import org.elasticsearch.transport.BaseTransportResponseHandler;
|
||||||
import org.elasticsearch.transport.NodeShouldNotConnectException;
|
import org.elasticsearch.transport.NodeShouldNotConnectException;
|
||||||
@ -118,7 +120,7 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
|||||||
FailedNodeException exception = (FailedNodeException) responses.get(i);
|
FailedNodeException exception = (FailedNodeException) responses.get(i);
|
||||||
totalShards += nodes.get(exception.nodeId()).size();
|
totalShards += nodes.get(exception.nodeId()).size();
|
||||||
for (ShardRouting shard : nodes.get(exception.nodeId())) {
|
for (ShardRouting shard : nodes.get(exception.nodeId())) {
|
||||||
exceptions.add(new DefaultShardOperationFailedException(shard.getIndex(), shard.getId(), exception));
|
exceptions.add(new DefaultShardOperationFailedException(shard.getIndexName(), shard.getId(), exception));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
NodeResponse response = (NodeResponse) responses.get(i);
|
NodeResponse response = (NodeResponse) responses.get(i);
|
||||||
@ -127,7 +129,7 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
|||||||
successfulShards += response.getSuccessfulShards();
|
successfulShards += response.getSuccessfulShards();
|
||||||
for (BroadcastShardOperationFailedException throwable : response.getExceptions()) {
|
for (BroadcastShardOperationFailedException throwable : response.getExceptions()) {
|
||||||
if (!TransportActions.isShardNotAvailableException(throwable)) {
|
if (!TransportActions.isShardNotAvailableException(throwable)) {
|
||||||
exceptions.add(new DefaultShardOperationFailedException(throwable.getIndex(), throwable.getShardId().getId(), throwable));
|
exceptions.add(new DefaultShardOperationFailedException(throwable.getShardId().getIndexName(), throwable.getShardId().getId(), throwable));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -206,11 +208,17 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
|||||||
protected abstract ClusterBlockException checkRequestBlock(ClusterState state, Request request, String[] concreteIndices);
|
protected abstract ClusterBlockException checkRequestBlock(ClusterState state, Request request, String[] concreteIndices);
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void doExecute(Request request, ActionListener<Response> listener) {
|
protected final void doExecute(Request request, ActionListener<Response> listener) {
|
||||||
new AsyncAction(request, listener).start();
|
throw new UnsupportedOperationException("the task parameter is required for this operation");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void doExecute(Task task, Request request, ActionListener<Response> listener) {
|
||||||
|
new AsyncAction(task, request, listener).start();
|
||||||
}
|
}
|
||||||
|
|
||||||
protected class AsyncAction {
|
protected class AsyncAction {
|
||||||
|
private final Task task;
|
||||||
private final Request request;
|
private final Request request;
|
||||||
private final ActionListener<Response> listener;
|
private final ActionListener<Response> listener;
|
||||||
private final ClusterState clusterState;
|
private final ClusterState clusterState;
|
||||||
@ -220,7 +228,8 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
|||||||
private final AtomicInteger counter = new AtomicInteger();
|
private final AtomicInteger counter = new AtomicInteger();
|
||||||
private List<NoShardAvailableActionException> unavailableShardExceptions = new ArrayList<>();
|
private List<NoShardAvailableActionException> unavailableShardExceptions = new ArrayList<>();
|
||||||
|
|
||||||
protected AsyncAction(Request request, ActionListener<Response> listener) {
|
protected AsyncAction(Task task, Request request, ActionListener<Response> listener) {
|
||||||
|
this.task = task;
|
||||||
this.request = request;
|
this.request = request;
|
||||||
this.listener = listener;
|
this.listener = listener;
|
||||||
|
|
||||||
@ -290,6 +299,9 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
|||||||
private void sendNodeRequest(final DiscoveryNode node, List<ShardRouting> shards, final int nodeIndex) {
|
private void sendNodeRequest(final DiscoveryNode node, List<ShardRouting> shards, final int nodeIndex) {
|
||||||
try {
|
try {
|
||||||
NodeRequest nodeRequest = new NodeRequest(node.getId(), request, shards);
|
NodeRequest nodeRequest = new NodeRequest(node.getId(), request, shards);
|
||||||
|
if (task != null) {
|
||||||
|
nodeRequest.setParentTask(clusterService.localNode().id(), task.getId());
|
||||||
|
}
|
||||||
transportService.sendRequest(node, transportNodeBroadcastAction, nodeRequest, new BaseTransportResponseHandler<NodeResponse>() {
|
transportService.sendRequest(node, transportNodeBroadcastAction, nodeRequest, new BaseTransportResponseHandler<NodeResponse>() {
|
||||||
@Override
|
@Override
|
||||||
public NodeResponse newInstance() {
|
public NodeResponse newInstance() {
|
||||||
@ -406,7 +418,7 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
|||||||
}
|
}
|
||||||
} catch (Throwable t) {
|
} catch (Throwable t) {
|
||||||
BroadcastShardOperationFailedException e = new BroadcastShardOperationFailedException(shardRouting.shardId(), "operation " + actionName + " failed", t);
|
BroadcastShardOperationFailedException e = new BroadcastShardOperationFailedException(shardRouting.shardId(), "operation " + actionName + " failed", t);
|
||||||
e.setIndex(shardRouting.getIndex());
|
e.setIndex(shardRouting.getIndexName());
|
||||||
e.setShard(shardRouting.shardId());
|
e.setShard(shardRouting.shardId());
|
||||||
shardResults[shardIndex] = e;
|
shardResults[shardIndex] = e;
|
||||||
if (TransportActions.isShardNotAvailableException(t)) {
|
if (TransportActions.isShardNotAvailableException(t)) {
|
||||||
@ -422,7 +434,7 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public class NodeRequest extends TransportRequest implements IndicesRequest {
|
public class NodeRequest extends ChildTaskRequest implements IndicesRequest {
|
||||||
private String nodeId;
|
private String nodeId;
|
||||||
|
|
||||||
private List<ShardRouting> shards;
|
private List<ShardRouting> shards;
|
||||||
@ -433,7 +445,6 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
|||||||
}
|
}
|
||||||
|
|
||||||
public NodeRequest(String nodeId, Request request, List<ShardRouting> shards) {
|
public NodeRequest(String nodeId, Request request, List<ShardRouting> shards) {
|
||||||
super(request);
|
|
||||||
this.indicesLevelRequest = request;
|
this.indicesLevelRequest = request;
|
||||||
this.shards = shards;
|
this.shards = shards;
|
||||||
this.nodeId = nodeId;
|
this.nodeId = nodeId;
|
||||||
|
@ -42,10 +42,6 @@ public abstract class AcknowledgedRequest<Request extends MasterNodeRequest<Requ
|
|||||||
protected AcknowledgedRequest() {
|
protected AcknowledgedRequest() {
|
||||||
}
|
}
|
||||||
|
|
||||||
protected AcknowledgedRequest(ActionRequest<?> request) {
|
|
||||||
super(request);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Allows to set the timeout
|
* Allows to set the timeout
|
||||||
* @param timeout timeout as a string (e.g. 1s)
|
* @param timeout timeout as a string (e.g. 1s)
|
||||||
|
@ -20,6 +20,7 @@
|
|||||||
package org.elasticsearch.action.support.master;
|
package org.elasticsearch.action.support.master;
|
||||||
|
|
||||||
import org.elasticsearch.action.ActionRequest;
|
import org.elasticsearch.action.ActionRequest;
|
||||||
|
import org.elasticsearch.action.support.ChildTaskActionRequest;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
@ -29,18 +30,13 @@ import java.io.IOException;
|
|||||||
/**
|
/**
|
||||||
* A based request for master based operation.
|
* A based request for master based operation.
|
||||||
*/
|
*/
|
||||||
public abstract class MasterNodeRequest<Request extends MasterNodeRequest<Request>> extends ActionRequest<Request> {
|
public abstract class MasterNodeRequest<Request extends MasterNodeRequest<Request>> extends ChildTaskActionRequest<Request> {
|
||||||
|
|
||||||
public static final TimeValue DEFAULT_MASTER_NODE_TIMEOUT = TimeValue.timeValueSeconds(30);
|
public static final TimeValue DEFAULT_MASTER_NODE_TIMEOUT = TimeValue.timeValueSeconds(30);
|
||||||
|
|
||||||
protected TimeValue masterNodeTimeout = DEFAULT_MASTER_NODE_TIMEOUT;
|
protected TimeValue masterNodeTimeout = DEFAULT_MASTER_NODE_TIMEOUT;
|
||||||
|
|
||||||
protected MasterNodeRequest() {
|
protected MasterNodeRequest() {
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
protected MasterNodeRequest(ActionRequest<?> request) {
|
|
||||||
super(request);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -113,6 +113,9 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
|
|||||||
AsyncSingleAction(Task task, Request request, ActionListener<Response> listener) {
|
AsyncSingleAction(Task task, Request request, ActionListener<Response> listener) {
|
||||||
this.task = task;
|
this.task = task;
|
||||||
this.request = request;
|
this.request = request;
|
||||||
|
if (task != null) {
|
||||||
|
request.setParentTask(clusterService.localNode().getId(), task.getId());
|
||||||
|
}
|
||||||
// TODO do we really need to wrap it in a listener? the handlers should be cheap
|
// TODO do we really need to wrap it in a listener? the handlers should be cheap
|
||||||
if ((listener instanceof ThreadedActionListener) == false) {
|
if ((listener instanceof ThreadedActionListener) == false) {
|
||||||
listener = new ThreadedActionListener<>(logger, threadPool, ThreadPool.Names.LISTENER, listener);
|
listener = new ThreadedActionListener<>(logger, threadPool, ThreadPool.Names.LISTENER, listener);
|
||||||
@ -121,7 +124,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
|
|||||||
}
|
}
|
||||||
|
|
||||||
public void start() {
|
public void start() {
|
||||||
this.observer = new ClusterStateObserver(clusterService, request.masterNodeTimeout(), logger);
|
this.observer = new ClusterStateObserver(clusterService, request.masterNodeTimeout(), logger, threadPool.getThreadContext());
|
||||||
doStart();
|
doStart();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -36,8 +36,7 @@ public abstract class BaseNodeRequest extends ChildTaskRequest {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
protected BaseNodeRequest(BaseNodesRequest request, String nodeId) {
|
protected BaseNodeRequest(String nodeId) {
|
||||||
super(request);
|
|
||||||
this.nodeId = nodeId;
|
this.nodeId = nodeId;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -43,11 +43,6 @@ public abstract class BaseNodesRequest<Request extends BaseNodesRequest<Request>
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
protected BaseNodesRequest(ActionRequest<?> request, String... nodesIds) {
|
|
||||||
super(request);
|
|
||||||
this.nodesIds = nodesIds;
|
|
||||||
}
|
|
||||||
|
|
||||||
protected BaseNodesRequest(String... nodesIds) {
|
protected BaseNodesRequest(String... nodesIds) {
|
||||||
this.nodesIds = nodesIds;
|
this.nodesIds = nodesIds;
|
||||||
}
|
}
|
||||||
|
@ -30,22 +30,13 @@ import org.elasticsearch.index.shard.ShardId;
|
|||||||
*/
|
*/
|
||||||
public class BasicReplicationRequest extends ReplicationRequest<BasicReplicationRequest> {
|
public class BasicReplicationRequest extends ReplicationRequest<BasicReplicationRequest> {
|
||||||
public BasicReplicationRequest() {
|
public BasicReplicationRequest() {
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new request that inherits headers and context from the request
|
|
||||||
* provided as argument.
|
|
||||||
*/
|
|
||||||
public BasicReplicationRequest(ActionRequest<?> request) {
|
|
||||||
super(request);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a new request with resolved shard id
|
* Creates a new request with resolved shard id
|
||||||
*/
|
*/
|
||||||
public BasicReplicationRequest(ActionRequest<?> request, ShardId shardId) {
|
public BasicReplicationRequest(ShardId shardId) {
|
||||||
super(request, shardId);
|
super(shardId);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -23,6 +23,7 @@ import org.elasticsearch.action.ActionRequest;
|
|||||||
import org.elasticsearch.action.ActionRequestValidationException;
|
import org.elasticsearch.action.ActionRequestValidationException;
|
||||||
import org.elasticsearch.action.IndicesRequest;
|
import org.elasticsearch.action.IndicesRequest;
|
||||||
import org.elasticsearch.action.WriteConsistencyLevel;
|
import org.elasticsearch.action.WriteConsistencyLevel;
|
||||||
|
import org.elasticsearch.action.support.ChildTaskActionRequest;
|
||||||
import org.elasticsearch.action.support.IndicesOptions;
|
import org.elasticsearch.action.support.IndicesOptions;
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
@ -38,7 +39,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
|
|||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
public abstract class ReplicationRequest<Request extends ReplicationRequest<Request>> extends ActionRequest<Request> implements IndicesRequest {
|
public abstract class ReplicationRequest<Request extends ReplicationRequest<Request>> extends ChildTaskActionRequest<Request> implements IndicesRequest {
|
||||||
|
|
||||||
public static final TimeValue DEFAULT_TIMEOUT = new TimeValue(1, TimeUnit.MINUTES);
|
public static final TimeValue DEFAULT_TIMEOUT = new TimeValue(1, TimeUnit.MINUTES);
|
||||||
|
|
||||||
@ -58,35 +59,20 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new request that inherits headers and context from the request provided as argument.
|
|
||||||
*/
|
|
||||||
public ReplicationRequest(ActionRequest<?> request) {
|
|
||||||
super(request);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a new request with resolved shard id
|
* Creates a new request with resolved shard id
|
||||||
*/
|
*/
|
||||||
public ReplicationRequest(ActionRequest<?> request, ShardId shardId) {
|
public ReplicationRequest(ShardId shardId) {
|
||||||
super(request);
|
this.index = shardId.getIndexName();
|
||||||
this.index = shardId.getIndex();
|
|
||||||
this.shardId = shardId;
|
this.shardId = shardId;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Copy constructor that creates a new request that is a copy of the one provided as an argument.
|
|
||||||
*/
|
|
||||||
protected ReplicationRequest(Request request) {
|
|
||||||
this(request, request);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Copy constructor that creates a new request that is a copy of the one provided as an argument.
|
* Copy constructor that creates a new request that is a copy of the one provided as an argument.
|
||||||
* The new request will inherit though headers and context from the original request that caused it.
|
* The new request will inherit though headers and context from the original request that caused it.
|
||||||
*/
|
*/
|
||||||
protected ReplicationRequest(Request request, ActionRequest<?> originalRequest) {
|
protected ReplicationRequest(Request request) {
|
||||||
super(originalRequest);
|
|
||||||
this.timeout = request.timeout();
|
this.timeout = request.timeout();
|
||||||
this.index = request.index();
|
this.index = request.index();
|
||||||
this.consistencyLevel = request.consistencyLevel();
|
this.consistencyLevel = request.consistencyLevel();
|
||||||
|
@ -40,6 +40,7 @@ import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
|
|||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.util.concurrent.CountDown;
|
import org.elasticsearch.common.util.concurrent.CountDown;
|
||||||
import org.elasticsearch.index.shard.ShardId;
|
import org.elasticsearch.index.shard.ShardId;
|
||||||
|
import org.elasticsearch.tasks.Task;
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
import org.elasticsearch.transport.TransportService;
|
import org.elasticsearch.transport.TransportService;
|
||||||
|
|
||||||
@ -67,8 +68,14 @@ public abstract class TransportBroadcastReplicationAction<Request extends Broadc
|
|||||||
this.clusterService = clusterService;
|
this.clusterService = clusterService;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void doExecute(final Request request, final ActionListener<Response> listener) {
|
protected final void doExecute(final Request request, final ActionListener<Response> listener) {
|
||||||
|
throw new UnsupportedOperationException("the task parameter is required for this operation");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void doExecute(Task task, Request request, ActionListener<Response> listener) {
|
||||||
final ClusterState clusterState = clusterService.state();
|
final ClusterState clusterState = clusterService.state();
|
||||||
List<ShardId> shards = shards(request, clusterState);
|
List<ShardId> shards = shards(request, clusterState);
|
||||||
final CopyOnWriteArrayList<ShardResponse> shardsResponses = new CopyOnWriteArrayList();
|
final CopyOnWriteArrayList<ShardResponse> shardsResponses = new CopyOnWriteArrayList();
|
||||||
@ -90,13 +97,13 @@ public abstract class TransportBroadcastReplicationAction<Request extends Broadc
|
|||||||
@Override
|
@Override
|
||||||
public void onFailure(Throwable e) {
|
public void onFailure(Throwable e) {
|
||||||
logger.trace("{}: got failure from {}", actionName, shardId);
|
logger.trace("{}: got failure from {}", actionName, shardId);
|
||||||
int totalNumCopies = clusterState.getMetaData().index(shardId.index().getName()).getNumberOfReplicas() + 1;
|
int totalNumCopies = clusterState.getMetaData().index(shardId.getIndexName()).getNumberOfReplicas() + 1;
|
||||||
ShardResponse shardResponse = newShardResponse();
|
ShardResponse shardResponse = newShardResponse();
|
||||||
ReplicationResponse.ShardInfo.Failure[] failures;
|
ReplicationResponse.ShardInfo.Failure[] failures;
|
||||||
if (TransportActions.isShardNotAvailableException(e)) {
|
if (TransportActions.isShardNotAvailableException(e)) {
|
||||||
failures = new ReplicationResponse.ShardInfo.Failure[0];
|
failures = new ReplicationResponse.ShardInfo.Failure[0];
|
||||||
} else {
|
} else {
|
||||||
ReplicationResponse.ShardInfo.Failure failure = new ReplicationResponse.ShardInfo.Failure(shardId.index().name(), shardId.id(), null, e, ExceptionsHelper.status(e), true);
|
ReplicationResponse.ShardInfo.Failure failure = new ReplicationResponse.ShardInfo.Failure(shardId, null, e, ExceptionsHelper.status(e), true);
|
||||||
failures = new ReplicationResponse.ShardInfo.Failure[totalNumCopies];
|
failures = new ReplicationResponse.ShardInfo.Failure[totalNumCopies];
|
||||||
Arrays.fill(failures, failure);
|
Arrays.fill(failures, failure);
|
||||||
}
|
}
|
||||||
@ -107,12 +114,14 @@ public abstract class TransportBroadcastReplicationAction<Request extends Broadc
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
shardExecute(request, shardId, shardActionListener);
|
shardExecute(task, request, shardId, shardActionListener);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void shardExecute(Request request, ShardId shardId, ActionListener<ShardResponse> shardActionListener) {
|
protected void shardExecute(Task task, Request request, ShardId shardId, ActionListener<ShardResponse> shardActionListener) {
|
||||||
replicatedBroadcastShardAction.execute(newShardRequest(request, shardId), shardActionListener);
|
ShardRequest shardRequest = newShardRequest(request, shardId);
|
||||||
|
shardRequest.setParentTask(clusterService.localNode().getId(), task.getId());
|
||||||
|
replicatedBroadcastShardAction.execute(shardRequest, shardActionListener);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -154,7 +163,7 @@ public abstract class TransportBroadcastReplicationAction<Request extends Broadc
|
|||||||
shardFailures = new ArrayList<>();
|
shardFailures = new ArrayList<>();
|
||||||
}
|
}
|
||||||
for (ReplicationResponse.ShardInfo.Failure failure : shardResponse.getShardInfo().getFailures()) {
|
for (ReplicationResponse.ShardInfo.Failure failure : shardResponse.getShardInfo().getFailures()) {
|
||||||
shardFailures.add(new DefaultShardOperationFailedException(new BroadcastShardOperationFailedException(new ShardId(failure.index(), failure.shardId()), failure.getCause())));
|
shardFailures.add(new DefaultShardOperationFailedException(new BroadcastShardOperationFailedException(failure.fullShardId(), failure.getCause())));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -52,6 +52,7 @@ import org.elasticsearch.common.settings.Settings;
|
|||||||
import org.elasticsearch.common.unit.TimeValue;
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||||
|
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||||
import org.elasticsearch.index.IndexService;
|
import org.elasticsearch.index.IndexService;
|
||||||
import org.elasticsearch.index.engine.VersionConflictEngineException;
|
import org.elasticsearch.index.engine.VersionConflictEngineException;
|
||||||
import org.elasticsearch.index.shard.IndexShard;
|
import org.elasticsearch.index.shard.IndexShard;
|
||||||
@ -60,6 +61,7 @@ import org.elasticsearch.index.translog.Translog;
|
|||||||
import org.elasticsearch.indices.IndicesService;
|
import org.elasticsearch.indices.IndicesService;
|
||||||
import org.elasticsearch.node.NodeClosedException;
|
import org.elasticsearch.node.NodeClosedException;
|
||||||
import org.elasticsearch.rest.RestStatus;
|
import org.elasticsearch.rest.RestStatus;
|
||||||
|
import org.elasticsearch.tasks.Task;
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
import org.elasticsearch.transport.BaseTransportResponseHandler;
|
import org.elasticsearch.transport.BaseTransportResponseHandler;
|
||||||
import org.elasticsearch.transport.ConnectTransportException;
|
import org.elasticsearch.transport.ConnectTransportException;
|
||||||
@ -133,8 +135,13 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void doExecute(Request request, ActionListener<Response> listener) {
|
protected final void doExecute(Request request, ActionListener<Response> listener) {
|
||||||
new ReroutePhase(request, listener).run();
|
throw new UnsupportedOperationException("the task parameter is required for this operation");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void doExecute(Task task, Request request, ActionListener<Response> listener) {
|
||||||
|
new ReroutePhase(task, request, listener).run();
|
||||||
}
|
}
|
||||||
|
|
||||||
protected abstract Response newResponseInstance();
|
protected abstract Response newResponseInstance();
|
||||||
@ -243,8 +250,8 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||||||
|
|
||||||
class OperationTransportHandler implements TransportRequestHandler<Request> {
|
class OperationTransportHandler implements TransportRequestHandler<Request> {
|
||||||
@Override
|
@Override
|
||||||
public void messageReceived(final Request request, final TransportChannel channel) throws Exception {
|
public void messageReceived(final Request request, final TransportChannel channel, Task task) throws Exception {
|
||||||
execute(request, new ActionListener<Response>() {
|
execute(task, request, new ActionListener<Response>() {
|
||||||
@Override
|
@Override
|
||||||
public void onResponse(Response result) {
|
public void onResponse(Response result) {
|
||||||
try {
|
try {
|
||||||
@ -264,6 +271,11 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void messageReceived(Request request, TransportChannel channel) throws Exception {
|
||||||
|
throw new UnsupportedOperationException("the task parameter is required for this operation");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
class PrimaryOperationTransportHandler implements TransportRequestHandler<Request> {
|
class PrimaryOperationTransportHandler implements TransportRequestHandler<Request> {
|
||||||
@ -297,7 +309,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||||||
private final TransportChannel channel;
|
private final TransportChannel channel;
|
||||||
// important: we pass null as a timeout as failing a replica is
|
// important: we pass null as a timeout as failing a replica is
|
||||||
// something we want to avoid at all costs
|
// something we want to avoid at all costs
|
||||||
private final ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger);
|
private final ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext());
|
||||||
|
|
||||||
AsyncReplicaAction(ReplicaRequest request, TransportChannel channel) {
|
AsyncReplicaAction(ReplicaRequest request, TransportChannel channel) {
|
||||||
this.request = request;
|
this.request = request;
|
||||||
@ -308,9 +320,12 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||||||
public void onFailure(Throwable t) {
|
public void onFailure(Throwable t) {
|
||||||
if (t instanceof RetryOnReplicaException) {
|
if (t instanceof RetryOnReplicaException) {
|
||||||
logger.trace("Retrying operation on replica, action [{}], request [{}]", t, transportReplicaAction, request);
|
logger.trace("Retrying operation on replica, action [{}], request [{}]", t, transportReplicaAction, request);
|
||||||
|
final ThreadContext threadContext = threadPool.getThreadContext();
|
||||||
|
final ThreadContext.StoredContext context = threadPool.getThreadContext().newStoredContext();
|
||||||
observer.waitForNextChange(new ClusterStateObserver.Listener() {
|
observer.waitForNextChange(new ClusterStateObserver.Listener() {
|
||||||
@Override
|
@Override
|
||||||
public void onNewClusterState(ClusterState state) {
|
public void onNewClusterState(ClusterState state) {
|
||||||
|
context.close();
|
||||||
// Forking a thread on local node via transport service so that custom transport service have an
|
// Forking a thread on local node via transport service so that custom transport service have an
|
||||||
// opportunity to execute custom logic before the replica operation begins
|
// opportunity to execute custom logic before the replica operation begins
|
||||||
String extraMessage = "action [" + transportReplicaAction + "], request[" + request + "]";
|
String extraMessage = "action [" + transportReplicaAction + "], request[" + request + "]";
|
||||||
@ -339,7 +354,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
private void failReplicaIfNeeded(Throwable t) {
|
private void failReplicaIfNeeded(Throwable t) {
|
||||||
String index = request.shardId().getIndex();
|
String index = request.shardId().getIndex().getName();
|
||||||
int shardId = request.shardId().id();
|
int shardId = request.shardId().id();
|
||||||
logger.trace("failure on replica [{}][{}], action [{}], request [{}]", t, index, shardId, actionName, request);
|
logger.trace("failure on replica [{}][{}], action [{}], request [{}]", t, index, shardId, actionName, request);
|
||||||
if (ignoreReplicaException(t) == false) {
|
if (ignoreReplicaException(t) == false) {
|
||||||
@ -403,10 +418,13 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||||||
private final ClusterStateObserver observer;
|
private final ClusterStateObserver observer;
|
||||||
private final AtomicBoolean finished = new AtomicBoolean();
|
private final AtomicBoolean finished = new AtomicBoolean();
|
||||||
|
|
||||||
ReroutePhase(Request request, ActionListener<Response> listener) {
|
ReroutePhase(Task task, Request request, ActionListener<Response> listener) {
|
||||||
this.request = request;
|
this.request = request;
|
||||||
|
if (task != null) {
|
||||||
|
this.request.setParentTask(clusterService.localNode().getId(), task.getId());
|
||||||
|
}
|
||||||
this.listener = listener;
|
this.listener = listener;
|
||||||
this.observer = new ClusterStateObserver(clusterService, request.timeout(), logger);
|
this.observer = new ClusterStateObserver(clusterService, request.timeout(), logger, threadPool.getThreadContext());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -432,7 +450,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||||||
resolveRequest(state.metaData(), concreteIndex, request);
|
resolveRequest(state.metaData(), concreteIndex, request);
|
||||||
assert request.shardId() != null : "request shardId must be set in resolveRequest";
|
assert request.shardId() != null : "request shardId must be set in resolveRequest";
|
||||||
|
|
||||||
IndexShardRoutingTable indexShard = state.getRoutingTable().shardRoutingTable(request.shardId().getIndex(), request.shardId().id());
|
IndexShardRoutingTable indexShard = state.getRoutingTable().shardRoutingTable(request.shardId());
|
||||||
final ShardRouting primary = indexShard.primaryShard();
|
final ShardRouting primary = indexShard.primaryShard();
|
||||||
if (primary == null || primary.active() == false) {
|
if (primary == null || primary.active() == false) {
|
||||||
logger.trace("primary shard [{}] is not yet active, scheduling a retry: action [{}], request [{}], cluster state version [{}]", request.shardId(), actionName, request, state.version());
|
logger.trace("primary shard [{}] is not yet active, scheduling a retry: action [{}], request [{}], cluster state version [{}]", request.shardId(), actionName, request, state.version());
|
||||||
@ -510,9 +528,12 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||||||
finishAsFailed(failure);
|
finishAsFailed(failure);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
final ThreadContext threadContext = threadPool.getThreadContext();
|
||||||
|
final ThreadContext.StoredContext context = threadPool.getThreadContext().newStoredContext();
|
||||||
observer.waitForNextChange(new ClusterStateObserver.Listener() {
|
observer.waitForNextChange(new ClusterStateObserver.Listener() {
|
||||||
@Override
|
@Override
|
||||||
public void onNewClusterState(ClusterState state) {
|
public void onNewClusterState(ClusterState state) {
|
||||||
|
context.close();
|
||||||
run();
|
run();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -523,6 +544,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onTimeout(TimeValue timeout) {
|
public void onTimeout(TimeValue timeout) {
|
||||||
|
context.close();
|
||||||
// Try one more time...
|
// Try one more time...
|
||||||
run();
|
run();
|
||||||
}
|
}
|
||||||
@ -637,7 +659,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||||||
}
|
}
|
||||||
final int sizeActive;
|
final int sizeActive;
|
||||||
final int requiredNumber;
|
final int requiredNumber;
|
||||||
IndexRoutingTable indexRoutingTable = state.getRoutingTable().index(shardId.getIndex());
|
IndexRoutingTable indexRoutingTable = state.getRoutingTable().index(shardId.getIndexName());
|
||||||
if (indexRoutingTable != null) {
|
if (indexRoutingTable != null) {
|
||||||
IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(shardId.getId());
|
IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(shardId.getId());
|
||||||
if (shardRoutingTable != null) {
|
if (shardRoutingTable != null) {
|
||||||
@ -702,7 +724,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||||||
}
|
}
|
||||||
|
|
||||||
protected Releasable getIndexShardOperationsCounter(ShardId shardId) {
|
protected Releasable getIndexShardOperationsCounter(ShardId shardId) {
|
||||||
IndexService indexService = indicesService.indexServiceSafe(shardId.index().getName());
|
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
|
||||||
IndexShard indexShard = indexService.getShard(shardId.id());
|
IndexShard indexShard = indexService.getShard(shardId.id());
|
||||||
return new IndexShardReference(indexShard);
|
return new IndexShardReference(indexShard);
|
||||||
}
|
}
|
||||||
@ -941,9 +963,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||||||
failuresArray = new ReplicationResponse.ShardInfo.Failure[shardReplicaFailures.size()];
|
failuresArray = new ReplicationResponse.ShardInfo.Failure[shardReplicaFailures.size()];
|
||||||
for (Map.Entry<String, Throwable> entry : shardReplicaFailures.entrySet()) {
|
for (Map.Entry<String, Throwable> entry : shardReplicaFailures.entrySet()) {
|
||||||
RestStatus restStatus = ExceptionsHelper.status(entry.getValue());
|
RestStatus restStatus = ExceptionsHelper.status(entry.getValue());
|
||||||
failuresArray[slot++] = new ReplicationResponse.ShardInfo.Failure(
|
failuresArray[slot++] = new ReplicationResponse.ShardInfo.Failure(shardId, entry.getKey(), entry.getValue(), restStatus, false);
|
||||||
shardId.getIndex(), shardId.getId(), entry.getKey(), entry.getValue(), restStatus, false
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
failuresArray = ReplicationResponse.EMPTY;
|
failuresArray = ReplicationResponse.EMPTY;
|
||||||
|
@ -124,7 +124,7 @@ public abstract class TransportInstanceSingleOperationAction<Request extends Ins
|
|||||||
}
|
}
|
||||||
|
|
||||||
public void start() {
|
public void start() {
|
||||||
this.observer = new ClusterStateObserver(clusterService, request.timeout(), logger);
|
this.observer = new ClusterStateObserver(clusterService, request.timeout(), logger, threadPool.getThreadContext());
|
||||||
doStart();
|
doStart();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -143,7 +143,7 @@ public abstract class TransportInstanceSingleOperationAction<Request extends Ins
|
|||||||
request.concreteIndex(indexNameExpressionResolver.concreteSingleIndex(observer.observedState(), request));
|
request.concreteIndex(indexNameExpressionResolver.concreteSingleIndex(observer.observedState(), request));
|
||||||
// check if we need to execute, and if not, return
|
// check if we need to execute, and if not, return
|
||||||
if (!resolveRequest(observer.observedState(), request, listener)) {
|
if (!resolveRequest(observer.observedState(), request, listener)) {
|
||||||
listener.onFailure(new IllegalStateException(LoggerMessageFormat.format("{} request {} could not be resolved", new ShardId(request.index, request.shardId), actionName)));
|
listener.onFailure(new IllegalStateException(LoggerMessageFormat.format("[{}][{}] request {} could not be resolved",request.index, request.shardId, actionName)));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
blockException = checkRequestBlock(observer.observedState(), request);
|
blockException = checkRequestBlock(observer.observedState(), request);
|
||||||
@ -217,7 +217,7 @@ public abstract class TransportInstanceSingleOperationAction<Request extends Ins
|
|||||||
Throwable listenFailure = failure;
|
Throwable listenFailure = failure;
|
||||||
if (listenFailure == null) {
|
if (listenFailure == null) {
|
||||||
if (shardIt == null) {
|
if (shardIt == null) {
|
||||||
listenFailure = new UnavailableShardsException(new ShardId(request.concreteIndex(), -1), "Timeout waiting for [{}], request: {}", request.timeout(), actionName);
|
listenFailure = new UnavailableShardsException(request.concreteIndex(), -1, "Timeout waiting for [{}], request: {}", request.timeout(), actionName);
|
||||||
} else {
|
} else {
|
||||||
listenFailure = new UnavailableShardsException(shardIt.shardId(), "[{}] shardIt, [{}] active : Timeout waiting for [{}], request: {}", shardIt.size(), shardIt.sizeActive(), request.timeout(), actionName);
|
listenFailure = new UnavailableShardsException(shardIt.shardId(), "[{}] shardIt, [{}] active : Timeout waiting for [{}], request: {}", shardIt.size(), shardIt.sizeActive(), request.timeout(), actionName);
|
||||||
}
|
}
|
||||||
|
@ -56,15 +56,6 @@ public abstract class SingleShardRequest<Request extends SingleShardRequest<Requ
|
|||||||
this.index = index;
|
this.index = index;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected SingleShardRequest(ActionRequest<?> request) {
|
|
||||||
super(request);
|
|
||||||
}
|
|
||||||
|
|
||||||
protected SingleShardRequest(ActionRequest<?> request, String index) {
|
|
||||||
super(request);
|
|
||||||
this.index = index;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return a validation exception if the index property hasn't been set
|
* @return a validation exception if the index property hasn't been set
|
||||||
*/
|
*/
|
||||||
|
@ -26,7 +26,6 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
|||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
import org.elasticsearch.common.regex.Regex;
|
import org.elasticsearch.common.regex.Regex;
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
import org.elasticsearch.tasks.ChildTask;
|
|
||||||
import org.elasticsearch.tasks.Task;
|
import org.elasticsearch.tasks.Task;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
@ -61,15 +60,6 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Get information about tasks from nodes based on the nodes ids specified.
|
|
||||||
* If none are passed, information for all nodes will be returned.
|
|
||||||
*/
|
|
||||||
public BaseTasksRequest(ActionRequest<?> request, String... nodesIds) {
|
|
||||||
super(request);
|
|
||||||
this.nodesIds = nodesIds;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get information about tasks from nodes based on the nodes ids specified.
|
* Get information about tasks from nodes based on the nodes ids specified.
|
||||||
* If none are passed, information for all nodes will be returned.
|
* If none are passed, information for all nodes will be returned.
|
||||||
@ -173,20 +163,13 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
|
|||||||
if (actions() != null && actions().length > 0 && Regex.simpleMatch(actions(), task.getAction()) == false) {
|
if (actions() != null && actions().length > 0 && Regex.simpleMatch(actions(), task.getAction()) == false) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (parentNode() != null || parentTaskId() != BaseTasksRequest.ALL_TASKS) {
|
if (parentNode() != null) {
|
||||||
if (task instanceof ChildTask) {
|
if (parentNode().equals(task.getParentNode()) == false) {
|
||||||
if (parentNode() != null) {
|
return false;
|
||||||
if (parentNode().equals(((ChildTask) task).getParentNode()) == false) {
|
}
|
||||||
return false;
|
}
|
||||||
}
|
if (parentTaskId() != BaseTasksRequest.ALL_TASKS) {
|
||||||
}
|
if (parentTaskId() != task.getParentId()) {
|
||||||
if (parentTaskId() != BaseTasksRequest.ALL_TASKS) {
|
|
||||||
if (parentTaskId() != ((ChildTask) task).getParentId()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// This is not a child task and we need to match parent node or id
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -291,7 +291,7 @@ public abstract class TransportTasksAction<
|
|||||||
}
|
}
|
||||||
|
|
||||||
protected NodeTaskRequest(TasksRequest tasksRequest) {
|
protected NodeTaskRequest(TasksRequest tasksRequest) {
|
||||||
super(tasksRequest);
|
super();
|
||||||
this.tasksRequest = tasksRequest;
|
this.tasksRequest = tasksRequest;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -41,8 +41,8 @@ public class MultiTermVectorsShardRequest extends SingleShardRequest<MultiTermVe
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
MultiTermVectorsShardRequest(MultiTermVectorsRequest request, String index, int shardId) {
|
MultiTermVectorsShardRequest(String index, int shardId) {
|
||||||
super(request, index);
|
super(index);
|
||||||
this.shardId = shardId;
|
this.shardId = shardId;
|
||||||
locations = new IntArrayList();
|
locations = new IntArrayList();
|
||||||
requests = new ArrayList<>();
|
requests = new ArrayList<>();
|
||||||
|
@ -82,7 +82,7 @@ public class TransportMultiTermVectorsAction extends HandledTransportAction<Mult
|
|||||||
termVectorsRequest.id(), termVectorsRequest.routing());
|
termVectorsRequest.id(), termVectorsRequest.routing());
|
||||||
MultiTermVectorsShardRequest shardRequest = shardRequests.get(shardId);
|
MultiTermVectorsShardRequest shardRequest = shardRequests.get(shardId);
|
||||||
if (shardRequest == null) {
|
if (shardRequest == null) {
|
||||||
shardRequest = new MultiTermVectorsShardRequest(request, shardId.index().name(), shardId.id());
|
shardRequest = new MultiTermVectorsShardRequest(shardId.getIndexName(), shardId.id());
|
||||||
shardRequest.preference(request.preference);
|
shardRequest.preference(request.preference);
|
||||||
shardRequests.put(shardId, shardRequest);
|
shardRequests.put(shardId, shardRequest);
|
||||||
}
|
}
|
||||||
|
@ -39,6 +39,7 @@ import org.elasticsearch.search.SearchService;
|
|||||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||||
import org.elasticsearch.search.dfs.AggregatedDfs;
|
import org.elasticsearch.search.dfs.AggregatedDfs;
|
||||||
import org.elasticsearch.search.dfs.DfsSearchResult;
|
import org.elasticsearch.search.dfs.DfsSearchResult;
|
||||||
|
import org.elasticsearch.tasks.Task;
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
import org.elasticsearch.transport.TransportService;
|
import org.elasticsearch.transport.TransportService;
|
||||||
|
|
||||||
@ -69,14 +70,14 @@ public class TransportDfsOnlyAction extends TransportBroadcastAction<DfsOnlyRequ
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void doExecute(DfsOnlyRequest request, ActionListener<DfsOnlyResponse> listener) {
|
protected void doExecute(Task task, DfsOnlyRequest request, ActionListener<DfsOnlyResponse> listener) {
|
||||||
request.nowInMillis = System.currentTimeMillis();
|
request.nowInMillis = System.currentTimeMillis();
|
||||||
super.doExecute(request, listener);
|
super.doExecute(task, request, listener);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected ShardDfsOnlyRequest newShardRequest(int numShards, ShardRouting shard, DfsOnlyRequest request) {
|
protected ShardDfsOnlyRequest newShardRequest(int numShards, ShardRouting shard, DfsOnlyRequest request) {
|
||||||
String[] filteringAliases = indexNameExpressionResolver.filteringAliases(clusterService.state(), shard.index(), request.indices());
|
String[] filteringAliases = indexNameExpressionResolver.filteringAliases(clusterService.state(), shard.index().getName(), request.indices());
|
||||||
return new ShardDfsOnlyRequest(shard, numShards, filteringAliases, request.nowInMillis, request);
|
return new ShardDfsOnlyRequest(shard, numShards, filteringAliases, request.nowInMillis, request);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -113,7 +113,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
|
|||||||
protected void doExecute(final UpdateRequest request, final ActionListener<UpdateResponse> listener) {
|
protected void doExecute(final UpdateRequest request, final ActionListener<UpdateResponse> listener) {
|
||||||
// if we don't have a master, we don't have metadata, that's fine, let it find a master using create index API
|
// if we don't have a master, we don't have metadata, that's fine, let it find a master using create index API
|
||||||
if (autoCreateIndex.shouldAutoCreate(request.index(), clusterService.state())) {
|
if (autoCreateIndex.shouldAutoCreate(request.index(), clusterService.state())) {
|
||||||
createIndexAction.execute(new CreateIndexRequest(request).index(request.index()).cause("auto(update api)").masterNodeTimeout(request.timeout()), new ActionListener<CreateIndexResponse>() {
|
createIndexAction.execute(new CreateIndexRequest().index(request.index()).cause("auto(update api)").masterNodeTimeout(request.timeout()), new ActionListener<CreateIndexResponse>() {
|
||||||
@Override
|
@Override
|
||||||
public void onResponse(CreateIndexResponse result) {
|
public void onResponse(CreateIndexResponse result) {
|
||||||
innerExecute(request, listener);
|
innerExecute(request, listener);
|
||||||
@ -164,12 +164,12 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
|
|||||||
}
|
}
|
||||||
|
|
||||||
protected void shardOperation(final UpdateRequest request, final ActionListener<UpdateResponse> listener, final int retryCount) {
|
protected void shardOperation(final UpdateRequest request, final ActionListener<UpdateResponse> listener, final int retryCount) {
|
||||||
IndexService indexService = indicesService.indexServiceSafe(request.concreteIndex());
|
final IndexService indexService = indicesService.indexServiceSafe(request.concreteIndex());
|
||||||
IndexShard indexShard = indexService.getShard(request.shardId());
|
final IndexShard indexShard = indexService.getShard(request.shardId());
|
||||||
final UpdateHelper.Result result = updateHelper.prepare(request, indexShard);
|
final UpdateHelper.Result result = updateHelper.prepare(request, indexShard);
|
||||||
switch (result.operation()) {
|
switch (result.operation()) {
|
||||||
case UPSERT:
|
case UPSERT:
|
||||||
IndexRequest upsertRequest = new IndexRequest(result.action(), request);
|
IndexRequest upsertRequest = new IndexRequest((IndexRequest)result.action());
|
||||||
// we fetch it from the index request so we don't generate the bytes twice, its already done in the index request
|
// we fetch it from the index request so we don't generate the bytes twice, its already done in the index request
|
||||||
final BytesReference upsertSourceBytes = upsertRequest.source();
|
final BytesReference upsertSourceBytes = upsertRequest.source();
|
||||||
indexAction.execute(upsertRequest, new ActionListener<IndexResponse>() {
|
indexAction.execute(upsertRequest, new ActionListener<IndexResponse>() {
|
||||||
@ -206,7 +206,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
|
|||||||
});
|
});
|
||||||
break;
|
break;
|
||||||
case INDEX:
|
case INDEX:
|
||||||
IndexRequest indexRequest = new IndexRequest(result.action(), request);
|
IndexRequest indexRequest = new IndexRequest((IndexRequest)result.action());
|
||||||
// we fetch it from the index request so we don't generate the bytes twice, its already done in the index request
|
// we fetch it from the index request so we don't generate the bytes twice, its already done in the index request
|
||||||
final BytesReference indexSourceBytes = indexRequest.source();
|
final BytesReference indexSourceBytes = indexRequest.source();
|
||||||
indexAction.execute(indexRequest, new ActionListener<IndexResponse>() {
|
indexAction.execute(indexRequest, new ActionListener<IndexResponse>() {
|
||||||
|
@ -44,6 +44,7 @@ import org.elasticsearch.index.mapper.internal.TimestampFieldMapper;
|
|||||||
import org.elasticsearch.index.shard.IndexShard;
|
import org.elasticsearch.index.shard.IndexShard;
|
||||||
import org.elasticsearch.index.shard.ShardId;
|
import org.elasticsearch.index.shard.ShardId;
|
||||||
import org.elasticsearch.script.ExecutableScript;
|
import org.elasticsearch.script.ExecutableScript;
|
||||||
|
import org.elasticsearch.script.Script;
|
||||||
import org.elasticsearch.script.ScriptContext;
|
import org.elasticsearch.script.ScriptContext;
|
||||||
import org.elasticsearch.script.ScriptService;
|
import org.elasticsearch.script.ScriptService;
|
||||||
import org.elasticsearch.search.fetch.source.FetchSourceContext;
|
import org.elasticsearch.search.fetch.source.FetchSourceContext;
|
||||||
@ -75,16 +76,15 @@ public class UpdateHelper extends AbstractComponent {
|
|||||||
final GetResult getResult = indexShard.getService().get(request.type(), request.id(),
|
final GetResult getResult = indexShard.getService().get(request.type(), request.id(),
|
||||||
new String[]{RoutingFieldMapper.NAME, ParentFieldMapper.NAME, TTLFieldMapper.NAME, TimestampFieldMapper.NAME},
|
new String[]{RoutingFieldMapper.NAME, ParentFieldMapper.NAME, TTLFieldMapper.NAME, TimestampFieldMapper.NAME},
|
||||||
true, request.version(), request.versionType(), FetchSourceContext.FETCH_SOURCE, false);
|
true, request.version(), request.versionType(), FetchSourceContext.FETCH_SOURCE, false);
|
||||||
return prepare(request, getResult);
|
return prepare(indexShard.shardId(), request, getResult);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Prepares an update request by converting it into an index or delete request or an update response (no action).
|
* Prepares an update request by converting it into an index or delete request or an update response (no action).
|
||||||
*/
|
*/
|
||||||
@SuppressWarnings("unchecked")
|
@SuppressWarnings("unchecked")
|
||||||
protected Result prepare(UpdateRequest request, final GetResult getResult) {
|
protected Result prepare(ShardId shardId, UpdateRequest request, final GetResult getResult) {
|
||||||
long getDateNS = System.nanoTime();
|
long getDateNS = System.nanoTime();
|
||||||
final ShardId shardId = new ShardId(getResult.getIndex(), request.shardId());
|
|
||||||
if (!getResult.isExists()) {
|
if (!getResult.isExists()) {
|
||||||
if (request.upsertRequest() == null && !request.docAsUpsert()) {
|
if (request.upsertRequest() == null && !request.docAsUpsert()) {
|
||||||
throw new DocumentMissingException(shardId, request.type(), request.id());
|
throw new DocumentMissingException(shardId, request.type(), request.id());
|
||||||
@ -99,7 +99,7 @@ public class UpdateHelper extends AbstractComponent {
|
|||||||
// Tell the script that this is a create and not an update
|
// Tell the script that this is a create and not an update
|
||||||
ctx.put("op", "create");
|
ctx.put("op", "create");
|
||||||
ctx.put("_source", upsertDoc);
|
ctx.put("_source", upsertDoc);
|
||||||
ctx = executeScript(request, ctx);
|
ctx = executeScript(request.script, ctx);
|
||||||
//Allow the script to set TTL using ctx._ttl
|
//Allow the script to set TTL using ctx._ttl
|
||||||
if (ttl == null) {
|
if (ttl == null) {
|
||||||
ttl = getTTLFromScriptContext(ctx);
|
ttl = getTTLFromScriptContext(ctx);
|
||||||
@ -193,7 +193,7 @@ public class UpdateHelper extends AbstractComponent {
|
|||||||
ctx.put("_ttl", originalTtl);
|
ctx.put("_ttl", originalTtl);
|
||||||
ctx.put("_source", sourceAndContent.v2());
|
ctx.put("_source", sourceAndContent.v2());
|
||||||
|
|
||||||
ctx = executeScript(request, ctx);
|
ctx = executeScript(request.script, ctx);
|
||||||
|
|
||||||
operation = (String) ctx.get("op");
|
operation = (String) ctx.get("op");
|
||||||
|
|
||||||
@ -243,14 +243,14 @@ public class UpdateHelper extends AbstractComponent {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private Map<String, Object> executeScript(UpdateRequest request, Map<String, Object> ctx) {
|
private Map<String, Object> executeScript(Script script, Map<String, Object> ctx) {
|
||||||
try {
|
try {
|
||||||
if (scriptService != null) {
|
if (scriptService != null) {
|
||||||
ExecutableScript script = scriptService.executable(request.script, ScriptContext.Standard.UPDATE, request, Collections.emptyMap());
|
ExecutableScript executableScript = scriptService.executable(script, ScriptContext.Standard.UPDATE, Collections.emptyMap());
|
||||||
script.setNextVar("ctx", ctx);
|
executableScript.setNextVar("ctx", ctx);
|
||||||
script.run();
|
executableScript.run();
|
||||||
// we need to unwrap the ctx...
|
// we need to unwrap the ctx...
|
||||||
ctx = (Map<String, Object>) script.unwrap(ctx);
|
ctx = (Map<String, Object>) executableScript.unwrap(ctx);
|
||||||
}
|
}
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
throw new IllegalArgumentException("failed to execute script", e);
|
throw new IllegalArgumentException("failed to execute script", e);
|
||||||
|
@ -171,7 +171,7 @@ final class Bootstrap {
|
|||||||
// placeholder
|
// placeholder
|
||||||
Settings nodeSettings = Settings.settingsBuilder()
|
Settings nodeSettings = Settings.settingsBuilder()
|
||||||
.put(settings)
|
.put(settings)
|
||||||
.put(InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING, true)
|
.put(InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING.getKey(), true)
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
node = new Node(nodeSettings);
|
node = new Node(nodeSettings);
|
||||||
@ -288,6 +288,9 @@ final class Bootstrap {
|
|||||||
// fail if using broken version
|
// fail if using broken version
|
||||||
JVMCheck.check();
|
JVMCheck.check();
|
||||||
|
|
||||||
|
// fail if somebody replaced the lucene jars
|
||||||
|
checkLucene();
|
||||||
|
|
||||||
INSTANCE.setup(true, settings, environment);
|
INSTANCE.setup(true, settings, environment);
|
||||||
|
|
||||||
INSTANCE.start();
|
INSTANCE.start();
|
||||||
@ -364,4 +367,11 @@ final class Bootstrap {
|
|||||||
private static void exit(int status) {
|
private static void exit(int status) {
|
||||||
System.exit(status);
|
System.exit(status);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static void checkLucene() {
|
||||||
|
if (Version.CURRENT.luceneVersion.equals(org.apache.lucene.util.Version.LATEST) == false) {
|
||||||
|
throw new AssertionError("Lucene version mismatch this version of Elasticsearch requires lucene version ["
|
||||||
|
+ Version.CURRENT.luceneVersion + "] but the current lucene version is [" + org.apache.lucene.util.Version.LATEST + "]");
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -27,7 +27,7 @@ import org.elasticsearch.common.settings.Settings;
|
|||||||
import org.elasticsearch.env.Environment;
|
import org.elasticsearch.env.Environment;
|
||||||
import org.elasticsearch.http.netty.NettyHttpServerTransport;
|
import org.elasticsearch.http.netty.NettyHttpServerTransport;
|
||||||
import org.elasticsearch.plugins.PluginInfo;
|
import org.elasticsearch.plugins.PluginInfo;
|
||||||
import org.elasticsearch.transport.netty.NettyTransport;
|
import org.elasticsearch.transport.TransportSettings;
|
||||||
|
|
||||||
import java.io.FilePermission;
|
import java.io.FilePermission;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
@ -277,10 +277,10 @@ final class Security {
|
|||||||
// see SocketPermission implies() code
|
// see SocketPermission implies() code
|
||||||
policy.add(new SocketPermission("*:" + httpRange, "listen,resolve"));
|
policy.add(new SocketPermission("*:" + httpRange, "listen,resolve"));
|
||||||
// transport is waaaay overengineered
|
// transport is waaaay overengineered
|
||||||
Map<String, Settings> profiles = settings.getGroups("transport.profiles", true);
|
Map<String, Settings> profiles = TransportSettings.TRANSPORT_PROFILES_SETTING.get(settings).getAsGroups();
|
||||||
if (!profiles.containsKey(NettyTransport.DEFAULT_PROFILE)) {
|
if (!profiles.containsKey(TransportSettings.DEFAULT_PROFILE)) {
|
||||||
profiles = new HashMap<>(profiles);
|
profiles = new HashMap<>(profiles);
|
||||||
profiles.put(NettyTransport.DEFAULT_PROFILE, Settings.EMPTY);
|
profiles.put(TransportSettings.DEFAULT_PROFILE, Settings.EMPTY);
|
||||||
}
|
}
|
||||||
|
|
||||||
// loop through all profiles and add permissions for each one, if its valid.
|
// loop through all profiles and add permissions for each one, if its valid.
|
||||||
@ -288,12 +288,10 @@ final class Security {
|
|||||||
for (Map.Entry<String, Settings> entry : profiles.entrySet()) {
|
for (Map.Entry<String, Settings> entry : profiles.entrySet()) {
|
||||||
Settings profileSettings = entry.getValue();
|
Settings profileSettings = entry.getValue();
|
||||||
String name = entry.getKey();
|
String name = entry.getKey();
|
||||||
String transportRange = profileSettings.get("port",
|
String transportRange = profileSettings.get("port", TransportSettings.PORT.get(settings));
|
||||||
settings.get("transport.tcp.port",
|
|
||||||
NettyTransport.DEFAULT_PORT_RANGE));
|
|
||||||
|
|
||||||
// a profile is only valid if its the default profile, or if it has an actual name and specifies a port
|
// a profile is only valid if its the default profile, or if it has an actual name and specifies a port
|
||||||
boolean valid = NettyTransport.DEFAULT_PROFILE.equals(name) || (Strings.hasLength(name) && profileSettings.get("port") != null);
|
boolean valid = TransportSettings.DEFAULT_PROFILE.equals(name) || (Strings.hasLength(name) && profileSettings.get("port") != null);
|
||||||
if (valid) {
|
if (valid) {
|
||||||
// listen is always called with 'localhost' but use wildcard to be sure, no name service is consulted.
|
// listen is always called with 'localhost' but use wildcard to be sure, no name service is consulted.
|
||||||
// see SocketPermission implies() code
|
// see SocketPermission implies() code
|
||||||
|
@ -19,8 +19,12 @@
|
|||||||
|
|
||||||
package org.elasticsearch.client;
|
package org.elasticsearch.client;
|
||||||
|
|
||||||
|
import org.elasticsearch.action.Action;
|
||||||
import org.elasticsearch.action.ActionFuture;
|
import org.elasticsearch.action.ActionFuture;
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
|
import org.elasticsearch.action.ActionRequest;
|
||||||
|
import org.elasticsearch.action.ActionRequestBuilder;
|
||||||
|
import org.elasticsearch.action.ActionResponse;
|
||||||
import org.elasticsearch.action.bulk.BulkRequest;
|
import org.elasticsearch.action.bulk.BulkRequest;
|
||||||
import org.elasticsearch.action.bulk.BulkRequestBuilder;
|
import org.elasticsearch.action.bulk.BulkRequestBuilder;
|
||||||
import org.elasticsearch.action.bulk.BulkResponse;
|
import org.elasticsearch.action.bulk.BulkResponse;
|
||||||
@ -80,11 +84,13 @@ import org.elasticsearch.action.termvectors.TermVectorsResponse;
|
|||||||
import org.elasticsearch.action.update.UpdateRequest;
|
import org.elasticsearch.action.update.UpdateRequest;
|
||||||
import org.elasticsearch.action.update.UpdateRequestBuilder;
|
import org.elasticsearch.action.update.UpdateRequestBuilder;
|
||||||
import org.elasticsearch.action.update.UpdateResponse;
|
import org.elasticsearch.action.update.UpdateResponse;
|
||||||
import org.elasticsearch.client.support.Headers;
|
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
import org.elasticsearch.common.lease.Releasable;
|
import org.elasticsearch.common.lease.Releasable;
|
||||||
|
import org.elasticsearch.common.settings.Setting;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A client provides a one stop interface for performing actions/operations against the cluster.
|
* A client provides a one stop interface for performing actions/operations against the cluster.
|
||||||
* <p>
|
* <p>
|
||||||
@ -100,7 +106,15 @@ import org.elasticsearch.common.settings.Settings;
|
|||||||
*/
|
*/
|
||||||
public interface Client extends ElasticsearchClient, Releasable {
|
public interface Client extends ElasticsearchClient, Releasable {
|
||||||
|
|
||||||
String CLIENT_TYPE_SETTING = "client.type";
|
Setting<String> CLIENT_TYPE_SETTING_S = new Setting<>("client.type", "node", (s) -> {
|
||||||
|
switch (s) {
|
||||||
|
case "node":
|
||||||
|
case "transport":
|
||||||
|
return s;
|
||||||
|
default:
|
||||||
|
throw new IllegalArgumentException("Can't parse [client.type] must be one of [node, transport]");
|
||||||
|
}
|
||||||
|
}, false, Setting.Scope.CLUSTER);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The admin client that can be used to perform administrative operations.
|
* The admin client that can be used to perform administrative operations.
|
||||||
@ -597,5 +611,9 @@ public interface Client extends ElasticsearchClient, Releasable {
|
|||||||
*/
|
*/
|
||||||
Settings settings();
|
Settings settings();
|
||||||
|
|
||||||
Headers headers();
|
/**
|
||||||
|
* Returns a new lightweight Client that applies all given headers to each of the requests
|
||||||
|
* issued from it.
|
||||||
|
*/
|
||||||
|
Client filterWithHeader(Map<String, String> headers);
|
||||||
}
|
}
|
||||||
|
@ -42,7 +42,7 @@ public abstract class FilterClient extends AbstractClient {
|
|||||||
* @see #in()
|
* @see #in()
|
||||||
*/
|
*/
|
||||||
public FilterClient(Client in) {
|
public FilterClient(Client in) {
|
||||||
super(in.settings(), in.threadPool(), in.headers());
|
super(in.settings(), in.threadPool());
|
||||||
this.in = in;
|
this.in = in;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user