Merge branch 'master' into dots2

This commit is contained in:
Ryan Ernst 2016-03-10 09:22:31 -08:00
commit ac73d97169
158 changed files with 2881 additions and 1688 deletions

View File

@ -31,6 +31,7 @@ import org.elasticsearch.discovery.DiscoveryStats;
import org.elasticsearch.http.HttpStats; import org.elasticsearch.http.HttpStats;
import org.elasticsearch.indices.NodeIndicesStats; import org.elasticsearch.indices.NodeIndicesStats;
import org.elasticsearch.indices.breaker.AllCircuitBreakerStats; import org.elasticsearch.indices.breaker.AllCircuitBreakerStats;
import org.elasticsearch.ingest.IngestStats;
import org.elasticsearch.monitor.fs.FsInfo; import org.elasticsearch.monitor.fs.FsInfo;
import org.elasticsearch.monitor.jvm.JvmStats; import org.elasticsearch.monitor.jvm.JvmStats;
import org.elasticsearch.monitor.os.OsStats; import org.elasticsearch.monitor.os.OsStats;
@ -81,6 +82,9 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
@Nullable @Nullable
private DiscoveryStats discoveryStats; private DiscoveryStats discoveryStats;
@Nullable
private IngestStats ingestStats;
NodeStats() { NodeStats() {
} }
@ -89,7 +93,8 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
@Nullable FsInfo fs, @Nullable TransportStats transport, @Nullable HttpStats http, @Nullable FsInfo fs, @Nullable TransportStats transport, @Nullable HttpStats http,
@Nullable AllCircuitBreakerStats breaker, @Nullable AllCircuitBreakerStats breaker,
@Nullable ScriptStats scriptStats, @Nullable ScriptStats scriptStats,
@Nullable DiscoveryStats discoveryStats) { @Nullable DiscoveryStats discoveryStats,
@Nullable IngestStats ingestStats) {
super(node); super(node);
this.timestamp = timestamp; this.timestamp = timestamp;
this.indices = indices; this.indices = indices;
@ -103,6 +108,7 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
this.breaker = breaker; this.breaker = breaker;
this.scriptStats = scriptStats; this.scriptStats = scriptStats;
this.discoveryStats = discoveryStats; this.discoveryStats = discoveryStats;
this.ingestStats = ingestStats;
} }
public long getTimestamp() { public long getTimestamp() {
@ -187,6 +193,11 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
return this.discoveryStats; return this.discoveryStats;
} }
@Nullable
public IngestStats getIngestStats() {
return ingestStats;
}
public static NodeStats readNodeStats(StreamInput in) throws IOException { public static NodeStats readNodeStats(StreamInput in) throws IOException {
NodeStats nodeInfo = new NodeStats(); NodeStats nodeInfo = new NodeStats();
nodeInfo.readFrom(in); nodeInfo.readFrom(in);
@ -224,7 +235,7 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
breaker = AllCircuitBreakerStats.readOptionalAllCircuitBreakerStats(in); breaker = AllCircuitBreakerStats.readOptionalAllCircuitBreakerStats(in);
scriptStats = in.readOptionalStreamable(ScriptStats::new); scriptStats = in.readOptionalStreamable(ScriptStats::new);
discoveryStats = in.readOptionalStreamable(() -> new DiscoveryStats(null)); discoveryStats = in.readOptionalStreamable(() -> new DiscoveryStats(null));
ingestStats = in.readOptionalWritable(IngestStats.PROTO);
} }
@Override @Override
@ -282,6 +293,7 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
out.writeOptionalStreamable(breaker); out.writeOptionalStreamable(breaker);
out.writeOptionalStreamable(scriptStats); out.writeOptionalStreamable(scriptStats);
out.writeOptionalStreamable(discoveryStats); out.writeOptionalStreamable(discoveryStats);
out.writeOptionalWriteable(ingestStats);
} }
@Override @Override
@ -337,6 +349,10 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
getDiscoveryStats().toXContent(builder, params); getDiscoveryStats().toXContent(builder, params);
} }
if (getIngestStats() != null) {
getIngestStats().toXContent(builder, params);
}
return builder; return builder;
} }
} }

View File

@ -42,6 +42,7 @@ public class NodesStatsRequest extends BaseNodesRequest<NodesStatsRequest> {
private boolean breaker; private boolean breaker;
private boolean script; private boolean script;
private boolean discovery; private boolean discovery;
private boolean ingest;
public NodesStatsRequest() { public NodesStatsRequest() {
} }
@ -69,6 +70,7 @@ public class NodesStatsRequest extends BaseNodesRequest<NodesStatsRequest> {
this.breaker = true; this.breaker = true;
this.script = true; this.script = true;
this.discovery = true; this.discovery = true;
this.ingest = true;
return this; return this;
} }
@ -87,6 +89,7 @@ public class NodesStatsRequest extends BaseNodesRequest<NodesStatsRequest> {
this.breaker = false; this.breaker = false;
this.script = false; this.script = false;
this.discovery = false; this.discovery = false;
this.ingest = false;
return this; return this;
} }
@ -250,6 +253,17 @@ public class NodesStatsRequest extends BaseNodesRequest<NodesStatsRequest> {
return this; return this;
} }
public boolean ingest() {
return ingest;
}
/**
* Should ingest statistics be returned.
*/
public NodesStatsRequest ingest(boolean ingest) {
this.ingest = ingest;
return this;
}
@Override @Override
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) throws IOException {
@ -265,6 +279,7 @@ public class NodesStatsRequest extends BaseNodesRequest<NodesStatsRequest> {
breaker = in.readBoolean(); breaker = in.readBoolean();
script = in.readBoolean(); script = in.readBoolean();
discovery = in.readBoolean(); discovery = in.readBoolean();
ingest = in.readBoolean();
} }
@Override @Override
@ -281,6 +296,7 @@ public class NodesStatsRequest extends BaseNodesRequest<NodesStatsRequest> {
out.writeBoolean(breaker); out.writeBoolean(breaker);
out.writeBoolean(script); out.writeBoolean(script);
out.writeBoolean(discovery); out.writeBoolean(discovery);
out.writeBoolean(ingest);
} }
} }

View File

@ -137,4 +137,12 @@ public class NodesStatsRequestBuilder extends NodesOperationRequestBuilder<Nodes
request.discovery(discovery); request.discovery(discovery);
return this; return this;
} }
/**
* Should ingest statistics be returned.
*/
public NodesStatsRequestBuilder ingest(boolean ingest) {
request.ingest(ingest);
return this;
}
} }

View File

@ -80,7 +80,8 @@ public class TransportNodesStatsAction extends TransportNodesAction<NodesStatsRe
protected NodeStats nodeOperation(NodeStatsRequest nodeStatsRequest) { protected NodeStats nodeOperation(NodeStatsRequest nodeStatsRequest) {
NodesStatsRequest request = nodeStatsRequest.request; NodesStatsRequest request = nodeStatsRequest.request;
return nodeService.stats(request.indices(), request.os(), request.process(), request.jvm(), request.threadPool(), return nodeService.stats(request.indices(), request.os(), request.process(), request.jvm(), request.threadPool(),
request.fs(), request.transport(), request.http(), request.breaker(), request.script(), request.discovery()); request.fs(), request.transport(), request.http(), request.breaker(), request.script(), request.discovery(),
request.ingest());
} }
@Override @Override

View File

@ -99,7 +99,7 @@ public class TransportClusterStatsAction extends TransportNodesAction<ClusterSta
@Override @Override
protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeRequest) { protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeRequest) {
NodeInfo nodeInfo = nodeService.info(false, true, false, true, false, true, false, true, false); NodeInfo nodeInfo = nodeService.info(false, true, false, true, false, true, false, true, false);
NodeStats nodeStats = nodeService.stats(CommonStatsFlags.NONE, false, true, true, false, true, false, false, false, false, false); NodeStats nodeStats = nodeService.stats(CommonStatsFlags.NONE, false, true, true, false, true, false, false, false, false, false, false);
List<ShardStats> shardsStats = new ArrayList<>(); List<ShardStats> shardsStats = new ArrayList<>();
for (IndexService indexService : indicesService) { for (IndexService indexService : indicesService) {
for (IndexShard indexShard : indexService) { for (IndexShard indexShard : indexService) {

View File

@ -77,7 +77,7 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastByNodeAc
@Override @Override
protected EmptyResult shardOperation(ClearIndicesCacheRequest request, ShardRouting shardRouting) { protected EmptyResult shardOperation(ClearIndicesCacheRequest request, ShardRouting shardRouting) {
IndexService service = indicesService.indexService(shardRouting.getIndexName()); IndexService service = indicesService.indexService(shardRouting.index());
if (service != null) { if (service != null) {
IndexShard shard = service.getShardOrNull(shardRouting.id()); IndexShard shard = service.getShardOrNull(shardRouting.id());
boolean clearedAtLeastOne = false; boolean clearedAtLeastOne = false;

View File

@ -93,7 +93,7 @@ public class TransportIndicesSegmentsAction extends TransportBroadcastByNodeActi
@Override @Override
protected ShardSegments shardOperation(IndicesSegmentsRequest request, ShardRouting shardRouting) { protected ShardSegments shardOperation(IndicesSegmentsRequest request, ShardRouting shardRouting) {
IndexService indexService = indicesService.indexServiceSafe(shardRouting.getIndexName()); IndexService indexService = indicesService.indexServiceSafe(shardRouting.index());
IndexShard indexShard = indexService.getShard(shardRouting.id()); IndexShard indexShard = indexService.getShard(shardRouting.id());
return new ShardSegments(indexShard.routingEntry(), indexShard.segments(request.verbose())); return new ShardSegments(indexShard.routingEntry(), indexShard.segments(request.verbose()));
} }

View File

@ -47,6 +47,7 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.VersionType; import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.Engine;
@ -104,8 +105,9 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
@Override @Override
protected Tuple<BulkShardResponse, BulkShardRequest> shardOperationOnPrimary(MetaData metaData, BulkShardRequest request) { protected Tuple<BulkShardResponse, BulkShardRequest> shardOperationOnPrimary(MetaData metaData, BulkShardRequest request) {
final IndexService indexService = indicesService.indexServiceSafe(request.index()); ShardId shardId = request.shardId();
final IndexShard indexShard = indexService.getShard(request.shardId().id()); final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
final IndexShard indexShard = indexService.getShard(shardId.getId());
long[] preVersions = new long[request.items().length]; long[] preVersions = new long[request.items().length];
VersionType[] preVersionTypes = new VersionType[request.items().length]; VersionType[] preVersionTypes = new VersionType[request.items().length];

View File

@ -112,7 +112,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio
logger.error("failed to execute pipeline for a bulk request", throwable); logger.error("failed to execute pipeline for a bulk request", throwable);
listener.onFailure(throwable); listener.onFailure(throwable);
} else { } else {
long ingestTookInMillis = TimeUnit.MILLISECONDS.convert(System.nanoTime() - ingestStartTimeInNanos, TimeUnit.NANOSECONDS); long ingestTookInMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - ingestStartTimeInNanos);
BulkRequest bulkRequest = bulkRequestModifier.getBulkRequest(); BulkRequest bulkRequest = bulkRequestModifier.getBulkRequest();
ActionListener<BulkResponse> actionListener = bulkRequestModifier.wrapActionListenerIfNeeded(ingestTookInMillis, listener); ActionListener<BulkResponse> actionListener = bulkRequestModifier.wrapActionListenerIfNeeded(ingestTookInMillis, listener);
if (bulkRequest.requests().isEmpty()) { if (bulkRequest.requests().isEmpty()) {

View File

@ -32,8 +32,6 @@ import org.elasticsearch.search.SearchShardTarget;
import java.io.IOException; import java.io.IOException;
import static org.elasticsearch.search.SearchShardTarget.readSearchShardTarget;
/** /**
* Represents a failure to search on a specific shard. * Represents a failure to search on a specific shard.
*/ */
@ -106,7 +104,7 @@ public class ShardSearchFailure implements ShardOperationFailedException {
@Override @Override
public int shardId() { public int shardId() {
if (shardTarget != null) { if (shardTarget != null) {
return shardTarget.shardId(); return shardTarget.shardId().id();
} }
return -1; return -1;
} }
@ -133,7 +131,7 @@ public class ShardSearchFailure implements ShardOperationFailedException {
@Override @Override
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) throws IOException {
if (in.readBoolean()) { if (in.readBoolean()) {
shardTarget = readSearchShardTarget(in); shardTarget = new SearchShardTarget(in);
} }
reason = in.readString(); reason = in.readString();
status = RestStatus.readFrom(in); status = RestStatus.readFrom(in);

View File

@ -143,7 +143,7 @@ public class TransportSuggestAction extends TransportBroadcastAction<SuggestRequ
throw new IllegalArgumentException("suggest content missing"); throw new IllegalArgumentException("suggest content missing");
} }
final SuggestionSearchContext context = suggestPhase.parseElement().parseInternal(parser, indexService.mapperService(), final SuggestionSearchContext context = suggestPhase.parseElement().parseInternal(parser, indexService.mapperService(),
indexService.fieldData(), request.shardId().getIndexName(), request.shardId().id()); indexService.fieldData(), request.shardId());
final Suggest result = suggestPhase.execute(context, searcher.searcher()); final Suggest result = suggestPhase.execute(context, searcher.searcher());
return new ShardSuggestResponse(request.shardId(), result); return new ShardSuggestResponse(request.shardId(), result);
} }

View File

@ -53,6 +53,7 @@ import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.engine.VersionConflictEngineException;
import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShard;
@ -372,18 +373,18 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
} }
private void failReplicaIfNeeded(Throwable t) { private void failReplicaIfNeeded(Throwable t) {
String index = request.shardId().getIndex().getName(); Index index = request.shardId().getIndex();
int shardId = request.shardId().id(); int shardId = request.shardId().id();
logger.trace("failure on replica [{}][{}], action [{}], request [{}]", t, index, shardId, actionName, request); logger.trace("failure on replica [{}][{}], action [{}], request [{}]", t, index, shardId, actionName, request);
if (ignoreReplicaException(t) == false) { if (ignoreReplicaException(t) == false) {
IndexService indexService = indicesService.indexService(index); IndexService indexService = indicesService.indexService(index);
if (indexService == null) { if (indexService == null) {
logger.debug("ignoring failed replica [{}][{}] because index was already removed.", index, shardId); logger.debug("ignoring failed replica {}[{}] because index was already removed.", index, shardId);
return; return;
} }
IndexShard indexShard = indexService.getShardOrNull(shardId); IndexShard indexShard = indexService.getShardOrNull(shardId);
if (indexShard == null) { if (indexShard == null) {
logger.debug("ignoring failed replica [{}][{}] because index was already removed.", index, shardId); logger.debug("ignoring failed replica {}[{}] because index was already removed.", index, shardId);
return; return;
} }
indexShard.failShard(actionName + " failed on replica", t); indexShard.failShard(actionName + " failed on replica", t);

View File

@ -27,6 +27,7 @@ import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.shard.ShardId;
import java.io.IOException; import java.io.IOException;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
@ -42,8 +43,8 @@ public abstract class InstanceShardOperationRequest<Request extends InstanceShar
protected TimeValue timeout = DEFAULT_TIMEOUT; protected TimeValue timeout = DEFAULT_TIMEOUT;
protected String index; protected String index;
// -1 means its not set, allows to explicitly direct a request to a specific shard // null means its not set, allows to explicitly direct a request to a specific shard
protected int shardId = -1; protected ShardId shardId = null;
private String concreteIndex; private String concreteIndex;
@ -115,7 +116,11 @@ public abstract class InstanceShardOperationRequest<Request extends InstanceShar
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) throws IOException {
super.readFrom(in); super.readFrom(in);
index = in.readString(); index = in.readString();
shardId = in.readInt(); if (in.readBoolean()) {
shardId = ShardId.readShardId(in);
} else {
shardId = null;
}
timeout = TimeValue.readTimeValue(in); timeout = TimeValue.readTimeValue(in);
concreteIndex = in.readOptionalString(); concreteIndex = in.readOptionalString();
} }
@ -124,7 +129,7 @@ public abstract class InstanceShardOperationRequest<Request extends InstanceShar
public void writeTo(StreamOutput out) throws IOException { public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out); super.writeTo(out);
out.writeString(index); out.writeString(index);
out.writeInt(shardId); out.writeOptionalStreamable(shardId);
timeout.writeTo(out); timeout.writeTo(out);
out.writeOptionalString(concreteIndex); out.writeOptionalString(concreteIndex);
} }

View File

@ -172,7 +172,7 @@ public abstract class TransportInstanceSingleOperationAction<Request extends Ins
return; return;
} }
request.shardId = shardIt.shardId().id(); request.shardId = shardIt.shardId();
DiscoveryNode node = nodes.get(shard.currentNodeId()); DiscoveryNode node = nodes.get(shard.currentNodeId());
transportService.sendRequest(node, shardActionName, request, transportOptions(), new BaseTransportResponseHandler<Response>() { transportService.sendRequest(node, shardActionName, request, transportOptions(), new BaseTransportResponseHandler<Response>() {

View File

@ -75,12 +75,12 @@ public class TransportShardMultiTermsVectorAction extends TransportSingleShardAc
@Override @Override
protected MultiTermVectorsShardResponse shardOperation(MultiTermVectorsShardRequest request, ShardId shardId) { protected MultiTermVectorsShardResponse shardOperation(MultiTermVectorsShardRequest request, ShardId shardId) {
MultiTermVectorsShardResponse response = new MultiTermVectorsShardResponse(); final MultiTermVectorsShardResponse response = new MultiTermVectorsShardResponse();
final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
final IndexShard indexShard = indexService.getShard(shardId.id());
for (int i = 0; i < request.locations.size(); i++) { for (int i = 0; i < request.locations.size(); i++) {
TermVectorsRequest termVectorsRequest = request.requests.get(i); TermVectorsRequest termVectorsRequest = request.requests.get(i);
try { try {
IndexService indexService = indicesService.indexServiceSafe(request.index());
IndexShard indexShard = indexService.getShard(shardId.id());
TermVectorsResponse termVectorsResponse = TermVectorsService.getTermVectors(indexShard, termVectorsRequest); TermVectorsResponse termVectorsResponse = TermVectorsService.getTermVectors(indexShard, termVectorsRequest);
termVectorsResponse.updateTookInMillis(termVectorsRequest.startTime()); termVectorsResponse.updateTookInMillis(termVectorsRequest.startTime());
response.add(request.locations.get(i), termVectorsResponse); response.add(request.locations.get(i), termVectorsResponse);

View File

@ -51,6 +51,7 @@ import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.engine.VersionConflictEngineException;
import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.IndexAlreadyExistsException; import org.elasticsearch.indices.IndexAlreadyExistsException;
import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
@ -147,8 +148,8 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
@Override @Override
protected ShardIterator shards(ClusterState clusterState, UpdateRequest request) { protected ShardIterator shards(ClusterState clusterState, UpdateRequest request) {
if (request.shardId() != -1) { if (request.getShardId() != null) {
return clusterState.routingTable().index(request.concreteIndex()).shard(request.shardId()).primaryShardIt(); return clusterState.routingTable().index(request.concreteIndex()).shard(request.getShardId().getId()).primaryShardIt();
} }
ShardIterator shardIterator = clusterService.operationRouting() ShardIterator shardIterator = clusterService.operationRouting()
.indexShards(clusterState, request.concreteIndex(), request.type(), request.id(), request.routing()); .indexShards(clusterState, request.concreteIndex(), request.type(), request.id(), request.routing());
@ -167,8 +168,9 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
} }
protected void shardOperation(final UpdateRequest request, final ActionListener<UpdateResponse> listener, final int retryCount) { protected void shardOperation(final UpdateRequest request, final ActionListener<UpdateResponse> listener, final int retryCount) {
final IndexService indexService = indicesService.indexServiceSafe(request.concreteIndex()); final ShardId shardId = request.getShardId();
final IndexShard indexShard = indexService.getShard(request.shardId()); final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
final IndexShard indexShard = indexService.getShard(shardId.getId());
final UpdateHelper.Result result = updateHelper.prepare(request, indexShard); final UpdateHelper.Result result = updateHelper.prepare(request, indexShard);
switch (result.operation()) { switch (result.operation()) {
case UPSERT: case UPSERT:
@ -194,7 +196,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
if (e instanceof VersionConflictEngineException) { if (e instanceof VersionConflictEngineException) {
if (retryCount < request.retryOnConflict()) { if (retryCount < request.retryOnConflict()) {
logger.trace("Retry attempt [{}] of [{}] on version conflict on [{}][{}][{}]", logger.trace("Retry attempt [{}] of [{}] on version conflict on [{}][{}][{}]",
retryCount + 1, request.retryOnConflict(), request.index(), request.shardId(), request.id()); retryCount + 1, request.retryOnConflict(), request.index(), request.getShardId(), request.id());
threadPool.executor(executor()).execute(new ActionRunnable<UpdateResponse>(listener) { threadPool.executor(executor()).execute(new ActionRunnable<UpdateResponse>(listener) {
@Override @Override
protected void doRun() { protected void doRun() {
@ -267,9 +269,9 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
break; break;
case NONE: case NONE:
UpdateResponse update = result.action(); UpdateResponse update = result.action();
IndexService indexServiceOrNull = indicesService.indexService(request.concreteIndex()); IndexService indexServiceOrNull = indicesService.indexService(shardId.getIndex());
if (indexServiceOrNull != null) { if (indexServiceOrNull != null) {
IndexShard shard = indexService.getShardOrNull(request.shardId()); IndexShard shard = indexService.getShardOrNull(shardId.getId());
if (shard != null) { if (shard != null) {
shard.noopUpdate(request.type()); shard.noopUpdate(request.type());
} }

View File

@ -36,6 +36,7 @@ import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.VersionType; import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.script.Script; import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptParameterParser; import org.elasticsearch.script.ScriptParameterParser;
import org.elasticsearch.script.ScriptParameterParser.ScriptParameterValue; import org.elasticsearch.script.ScriptParameterParser.ScriptParameterValue;
@ -88,7 +89,7 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
} }
public UpdateRequest(String index, String type, String id) { public UpdateRequest(String index, String type, String id) {
this.index = index; super(index);
this.type = type; this.type = type;
this.id = id; this.id = id;
} }
@ -195,7 +196,7 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
return parent; return parent;
} }
int shardId() { public ShardId getShardId() {
return this.shardId; return this.shardId;
} }

View File

@ -23,6 +23,7 @@ import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.index.Index;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
@ -120,7 +121,7 @@ public class ClusterChangedEvent {
/** /**
* Returns the indices deleted in this event * Returns the indices deleted in this event
*/ */
public List<String> indicesDeleted() { public List<Index> indicesDeleted() {
// If the new cluster state has a new cluster UUID, the likely scenario is that a node was elected // If the new cluster state has a new cluster UUID, the likely scenario is that a node was elected
// master that has had its data directory wiped out, in which case we don't want to delete the indices and lose data; // master that has had its data directory wiped out, in which case we don't want to delete the indices and lose data;
// rather we want to import them as dangling indices instead. So we check here if the cluster UUID differs from the previous // rather we want to import them as dangling indices instead. So we check here if the cluster UUID differs from the previous
@ -131,17 +132,18 @@ public class ClusterChangedEvent {
if (metaDataChanged() == false || isNewCluster()) { if (metaDataChanged() == false || isNewCluster()) {
return Collections.emptyList(); return Collections.emptyList();
} }
List<String> deleted = null; List<Index> deleted = null;
for (ObjectCursor<String> cursor : previousState.metaData().indices().keys()) { for (ObjectCursor<IndexMetaData> cursor : previousState.metaData().indices().values()) {
String index = cursor.value; IndexMetaData index = cursor.value;
if (!state.metaData().hasIndex(index)) { IndexMetaData current = state.metaData().index(index.getIndex().getName());
if (current == null || index.getIndexUUID().equals(current.getIndexUUID()) == false) {
if (deleted == null) { if (deleted == null) {
deleted = new ArrayList<>(); deleted = new ArrayList<>();
} }
deleted.add(index); deleted.add(index.getIndex());
} }
} }
return deleted == null ? Collections.<String>emptyList() : deleted; return deleted == null ? Collections.<Index>emptyList() : deleted;
} }
/** /**

View File

@ -136,6 +136,7 @@ public class ClusterModule extends AbstractModule {
bind(AllocationService.class).asEagerSingleton(); bind(AllocationService.class).asEagerSingleton();
bind(DiscoveryNodeService.class).asEagerSingleton(); bind(DiscoveryNodeService.class).asEagerSingleton();
bind(ClusterService.class).to(InternalClusterService.class).asEagerSingleton(); bind(ClusterService.class).to(InternalClusterService.class).asEagerSingleton();
bind(NodeConnectionsService.class).asEagerSingleton();
bind(OperationRouting.class).asEagerSingleton(); bind(OperationRouting.class).asEagerSingleton();
bind(MetaDataCreateIndexService.class).asEagerSingleton(); bind(MetaDataCreateIndexService.class).asEagerSingleton();
bind(MetaDataDeleteIndexService.class).asEagerSingleton(); bind(MetaDataDeleteIndexService.class).asEagerSingleton();

View File

@ -26,7 +26,6 @@ import org.elasticsearch.cluster.service.PendingClusterTask;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.component.LifecycleComponent;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.tasks.TaskManager;
import java.util.List; import java.util.List;
@ -154,9 +153,4 @@ public interface ClusterService extends LifecycleComponent<ClusterService> {
* @return A zero time value if the queue is empty, otherwise the time value oldest task waiting in the queue * @return A zero time value if the queue is empty, otherwise the time value oldest task waiting in the queue
*/ */
TimeValue getMaxTaskWaitTime(); TimeValue getMaxTaskWaitTime();
/**
* Returns task manager created in the cluster service
*/
TaskManager getTaskManager();
} }

View File

@ -0,0 +1,156 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.util.concurrent.FutureUtils;
import org.elasticsearch.common.util.concurrent.KeyedLock;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ScheduledFuture;
/**
* This component is responsible for connecting to nodes once they are added to the cluster state, and disconnect when they are
* removed. Also, it periodically checks that all connections are still open and if needed restores them.
* Note that this component is *not* responsible for removing nodes from the cluster if they disconnect / do not respond
* to pings. This is done by {@link org.elasticsearch.discovery.zen.fd.NodesFaultDetection}. Master fault detection
* is done by {@link org.elasticsearch.discovery.zen.fd.MasterFaultDetection}.
*/
public class NodeConnectionsService extends AbstractLifecycleComponent<NodeConnectionsService> {
public static final Setting<TimeValue> CLUSTER_NODE_RECONNECT_INTERVAL_SETTING =
Setting.positiveTimeSetting("cluster.nodes.reconnect_interval", TimeValue.timeValueSeconds(10), false, Setting.Scope.CLUSTER);
private final ThreadPool threadPool;
private final TransportService transportService;
// map between current node and the number of failed connection attempts. 0 means successfully connected.
// if a node doesn't appear in this list it shouldn't be monitored
private ConcurrentMap<DiscoveryNode, Integer> nodes = ConcurrentCollections.newConcurrentMap();
final private KeyedLock<DiscoveryNode> nodeLocks = new KeyedLock<>();
private final TimeValue reconnectInterval;
private volatile ScheduledFuture<?> backgroundFuture = null;
@Inject
public NodeConnectionsService(Settings settings, ThreadPool threadPool, TransportService transportService) {
super(settings);
this.threadPool = threadPool;
this.transportService = transportService;
this.reconnectInterval = NodeConnectionsService.CLUSTER_NODE_RECONNECT_INTERVAL_SETTING.get(settings);
}
public void connectToAddedNodes(ClusterChangedEvent event) {
// TODO: do this in parallel (and wait)
for (final DiscoveryNode node : event.nodesDelta().addedNodes()) {
try (Releasable ignored = nodeLocks.acquire(node)) {
Integer current = nodes.put(node, 0);
assert current == null : "node " + node + " was added in event but already in internal nodes";
validateNodeConnected(node);
}
}
}
public void disconnectFromRemovedNodes(ClusterChangedEvent event) {
for (final DiscoveryNode node : event.nodesDelta().removedNodes()) {
try (Releasable ignored = nodeLocks.acquire(node)) {
Integer current = nodes.remove(node);
assert current != null : "node " + node + " was removed in event but not in internal nodes";
try {
transportService.disconnectFromNode(node);
} catch (Throwable e) {
logger.warn("failed to disconnect to node [" + node + "]", e);
}
}
}
}
void validateNodeConnected(DiscoveryNode node) {
assert nodeLocks.isHeldByCurrentThread(node) : "validateNodeConnected must be called under lock";
if (lifecycle.stoppedOrClosed() ||
nodes.containsKey(node) == false) { // we double check existence of node since connectToNode might take time...
// nothing to do
} else {
try {
// connecting to an already connected node is a noop
transportService.connectToNode(node);
nodes.put(node, 0);
} catch (Exception e) {
Integer nodeFailureCount = nodes.get(node);
assert nodeFailureCount != null : node + " didn't have a counter in nodes map";
nodeFailureCount = nodeFailureCount + 1;
// log every 6th failure
if ((nodeFailureCount % 6) == 1) {
logger.warn("failed to connect to node {} (tried [{}] times)", e, node, nodeFailureCount);
}
nodes.put(node, nodeFailureCount);
}
}
}
class ConnectionChecker extends AbstractRunnable {
@Override
public void onFailure(Throwable t) {
logger.warn("unexpected error while checking for node reconnects", t);
}
protected void doRun() {
for (DiscoveryNode node : nodes.keySet()) {
try (Releasable ignored = nodeLocks.acquire(node)) {
validateNodeConnected(node);
}
}
}
@Override
public void onAfter() {
if (lifecycle.started()) {
backgroundFuture = threadPool.schedule(reconnectInterval, ThreadPool.Names.GENERIC, this);
}
}
}
@Override
protected void doStart() {
backgroundFuture = threadPool.schedule(reconnectInterval, ThreadPool.Names.GENERIC, new ConnectionChecker());
}
@Override
protected void doStop() {
FutureUtils.cancel(backgroundFuture);
}
@Override
protected void doClose() {
}
}

View File

@ -686,7 +686,7 @@ public class IndexNameExpressionResolver extends AbstractComponent {
} }
private boolean isEmptyOrTrivialWildcard(List<String> expressions) { private boolean isEmptyOrTrivialWildcard(List<String> expressions) {
return expressions.isEmpty() || (expressions.size() == 1 && (MetaData.ALL.equals(expressions.get(0))) || Regex.isMatchAllPattern(expressions.get(0))); return expressions.isEmpty() || (expressions.size() == 1 && (MetaData.ALL.equals(expressions.get(0)) || Regex.isMatchAllPattern(expressions.get(0))));
} }
private List<String> resolveEmptyOrTrivialWildcard(IndicesOptions options, MetaData metaData, boolean assertEmpty) { private List<String> resolveEmptyOrTrivialWildcard(IndicesOptions options, MetaData metaData, boolean assertEmpty) {

View File

@ -53,6 +53,7 @@ import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.env.Environment; import org.elasticsearch.env.Environment;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.NodeServicesProvider; import org.elasticsearch.index.NodeServicesProvider;
import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapper;
@ -188,7 +189,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
@Override @Override
public ClusterState execute(ClusterState currentState) throws Exception { public ClusterState execute(ClusterState currentState) throws Exception {
boolean indexCreated = false; Index createdIndex = null;
String removalReason = null; String removalReason = null;
try { try {
validate(request, currentState); validate(request, currentState);
@ -308,10 +309,9 @@ public class MetaDataCreateIndexService extends AbstractComponent {
// Set up everything, now locally create the index to see that things are ok, and apply // Set up everything, now locally create the index to see that things are ok, and apply
final IndexMetaData tmpImd = IndexMetaData.builder(request.index()).settings(actualIndexSettings).build(); final IndexMetaData tmpImd = IndexMetaData.builder(request.index()).settings(actualIndexSettings).build();
// create the index here (on the master) to validate it can be created, as well as adding the mapping // create the index here (on the master) to validate it can be created, as well as adding the mapping
indicesService.createIndex(nodeServicesProvider, tmpImd, Collections.emptyList()); final IndexService indexService = indicesService.createIndex(nodeServicesProvider, tmpImd, Collections.emptyList());
indexCreated = true; createdIndex = indexService.index();
// now add the mappings // now add the mappings
IndexService indexService = indicesService.indexServiceSafe(request.index());
MapperService mapperService = indexService.mapperService(); MapperService mapperService = indexService.mapperService();
// first, add the default mapping // first, add the default mapping
if (mappings.containsKey(MapperService.DEFAULT_MAPPING)) { if (mappings.containsKey(MapperService.DEFAULT_MAPPING)) {
@ -415,9 +415,9 @@ public class MetaDataCreateIndexService extends AbstractComponent {
removalReason = "cleaning up after validating index on master"; removalReason = "cleaning up after validating index on master";
return updatedState; return updatedState;
} finally { } finally {
if (indexCreated) { if (createdIndex != null) {
// Index was already partially created - need to clean up // Index was already partially created - need to clean up
indicesService.removeIndex(request.index(), removalReason != null ? removalReason : "failed to create index"); indicesService.removeIndex(createdIndex, removalReason != null ? removalReason : "failed to create index");
} }
} }
} }

View File

@ -31,6 +31,7 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.NodeServicesProvider; import org.elasticsearch.index.NodeServicesProvider;
@ -74,7 +75,7 @@ public class MetaDataIndexAliasesService extends AbstractComponent {
@Override @Override
public ClusterState execute(final ClusterState currentState) { public ClusterState execute(final ClusterState currentState) {
List<String> indicesToClose = new ArrayList<>(); List<Index> indicesToClose = new ArrayList<>();
Map<String, IndexService> indices = new HashMap<>(); Map<String, IndexService> indices = new HashMap<>();
try { try {
for (AliasAction aliasAction : request.actions()) { for (AliasAction aliasAction : request.actions()) {
@ -112,7 +113,7 @@ public class MetaDataIndexAliasesService extends AbstractComponent {
logger.warn("[{}] failed to temporary create in order to apply alias action", e, indexMetaData.getIndex()); logger.warn("[{}] failed to temporary create in order to apply alias action", e, indexMetaData.getIndex());
continue; continue;
} }
indicesToClose.add(indexMetaData.getIndex().getName()); indicesToClose.add(indexMetaData.getIndex());
} }
indices.put(indexMetaData.getIndex().getName(), indexService); indices.put(indexMetaData.getIndex().getName(), indexService);
} }
@ -153,7 +154,7 @@ public class MetaDataIndexAliasesService extends AbstractComponent {
} }
return currentState; return currentState;
} finally { } finally {
for (String index : indicesToClose) { for (Index index : indicesToClose) {
indicesService.removeIndex(index, "created for alias processing"); indicesService.removeIndex(index, "created for alias processing");
} }
} }

View File

@ -36,6 +36,7 @@ import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.NodeServicesProvider; import org.elasticsearch.index.NodeServicesProvider;
@ -112,13 +113,13 @@ public class MetaDataMappingService extends AbstractComponent {
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData()); MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
for (Map.Entry<String, List<RefreshTask>> entry : tasksPerIndex.entrySet()) { for (Map.Entry<String, List<RefreshTask>> entry : tasksPerIndex.entrySet()) {
String index = entry.getKey(); IndexMetaData indexMetaData = mdBuilder.get(entry.getKey());
IndexMetaData indexMetaData = mdBuilder.get(index);
if (indexMetaData == null) { if (indexMetaData == null) {
// index got deleted on us, ignore... // index got deleted on us, ignore...
logger.debug("[{}] ignoring tasks - index meta data doesn't exist", index); logger.debug("[{}] ignoring tasks - index meta data doesn't exist", entry.getKey());
continue; continue;
} }
final Index index = indexMetaData.getIndex();
// the tasks lists to iterate over, filled with the list of mapping tasks, trying to keep // the tasks lists to iterate over, filled with the list of mapping tasks, trying to keep
// the latest (based on order) update mapping one per node // the latest (based on order) update mapping one per node
List<RefreshTask> allIndexTasks = entry.getValue(); List<RefreshTask> allIndexTasks = entry.getValue();
@ -127,7 +128,7 @@ public class MetaDataMappingService extends AbstractComponent {
if (indexMetaData.isSameUUID(task.indexUUID)) { if (indexMetaData.isSameUUID(task.indexUUID)) {
hasTaskWithRightUUID = true; hasTaskWithRightUUID = true;
} else { } else {
logger.debug("[{}] ignoring task [{}] - index meta data doesn't match task uuid", index, task); logger.debug("{} ignoring task [{}] - index meta data doesn't match task uuid", index, task);
} }
} }
if (hasTaskWithRightUUID == false) { if (hasTaskWithRightUUID == false) {
@ -136,7 +137,7 @@ public class MetaDataMappingService extends AbstractComponent {
// construct the actual index if needed, and make sure the relevant mappings are there // construct the actual index if needed, and make sure the relevant mappings are there
boolean removeIndex = false; boolean removeIndex = false;
IndexService indexService = indicesService.indexService(index); IndexService indexService = indicesService.indexService(indexMetaData.getIndex());
if (indexService == null) { if (indexService == null) {
// we need to create the index here, and add the current mapping to it, so we can merge // we need to create the index here, and add the current mapping to it, so we can merge
indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.emptyList()); indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.emptyList());
@ -208,47 +209,57 @@ public class MetaDataMappingService extends AbstractComponent {
class PutMappingExecutor implements ClusterStateTaskExecutor<PutMappingClusterStateUpdateRequest> { class PutMappingExecutor implements ClusterStateTaskExecutor<PutMappingClusterStateUpdateRequest> {
@Override @Override
public BatchResult<PutMappingClusterStateUpdateRequest> execute(ClusterState currentState, List<PutMappingClusterStateUpdateRequest> tasks) throws Exception { public BatchResult<PutMappingClusterStateUpdateRequest> execute(ClusterState currentState,
Set<String> indicesToClose = new HashSet<>(); List<PutMappingClusterStateUpdateRequest> tasks) throws Exception {
Set<Index> indicesToClose = new HashSet<>();
BatchResult.Builder<PutMappingClusterStateUpdateRequest> builder = BatchResult.builder(); BatchResult.Builder<PutMappingClusterStateUpdateRequest> builder = BatchResult.builder();
try { try {
// precreate incoming indices; // precreate incoming indices;
for (PutMappingClusterStateUpdateRequest request : tasks) { for (PutMappingClusterStateUpdateRequest request : tasks) {
// failures here mean something is broken with our cluster state - fail all tasks by letting exceptions bubble up final List<Index> indices = new ArrayList<>(request.indices().length);
for (String index : request.indices()) { try {
final IndexMetaData indexMetaData = currentState.metaData().index(index); for (String index : request.indices()) {
if (indexMetaData != null && indicesService.hasIndex(index) == false) { final IndexMetaData indexMetaData = currentState.metaData().index(index);
// if we don't have the index, we will throw exceptions later; if (indexMetaData != null) {
indicesToClose.add(index); if (indicesService.hasIndex(indexMetaData.getIndex()) == false) {
IndexService indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.emptyList()); // if the index does not exists we create it once, add all types to the mapper service and
// add mappings for all types, we need them for cross-type validation // close it later once we are done with mapping update
for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) { indicesToClose.add(indexMetaData.getIndex());
indexService.mapperService().merge(mapping.value.type(), mapping.value.source(), MapperService.MergeReason.MAPPING_RECOVERY, request.updateAllTypes()); IndexService indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData,
Collections.emptyList());
// add mappings for all types, we need them for cross-type validation
for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) {
indexService.mapperService().merge(mapping.value.type(), mapping.value.source(),
MapperService.MergeReason.MAPPING_RECOVERY, request.updateAllTypes());
}
}
indices.add(indexMetaData.getIndex());
} else {
// we didn't find the index in the clusterstate - maybe it was deleted
// NOTE: this doesn't fail the entire batch only the current PutMapping request we are processing
throw new IndexNotFoundException(index);
} }
} }
} currentState = applyRequest(currentState, request, indices);
}
for (PutMappingClusterStateUpdateRequest request : tasks) {
try {
currentState = applyRequest(currentState, request);
builder.success(request); builder.success(request);
} catch (Throwable t) { } catch (Throwable t) {
builder.failure(request, t); builder.failure(request, t);
} }
} }
return builder.build(currentState); return builder.build(currentState);
} finally { } finally {
for (String index : indicesToClose) { for (Index index : indicesToClose) {
indicesService.removeIndex(index, "created for mapping processing"); indicesService.removeIndex(index, "created for mapping processing");
} }
} }
} }
private ClusterState applyRequest(ClusterState currentState, PutMappingClusterStateUpdateRequest request) throws IOException { private ClusterState applyRequest(ClusterState currentState, PutMappingClusterStateUpdateRequest request,
List<Index> indices) throws IOException {
String mappingType = request.type(); String mappingType = request.type();
CompressedXContent mappingUpdateSource = new CompressedXContent(request.source()); CompressedXContent mappingUpdateSource = new CompressedXContent(request.source());
for (String index : request.indices()) { final MetaData metaData = currentState.metaData();
for (Index index : indices) {
IndexService indexService = indicesService.indexServiceSafe(index); IndexService indexService = indicesService.indexServiceSafe(index);
// try and parse it (no need to add it here) so we can bail early in case of parsing exception // try and parse it (no need to add it here) so we can bail early in case of parsing exception
DocumentMapper newMapper; DocumentMapper newMapper;
@ -270,7 +281,7 @@ public class MetaDataMappingService extends AbstractComponent {
// and a put mapping api call, so we don't which type did exist before. // and a put mapping api call, so we don't which type did exist before.
// Also the order of the mappings may be backwards. // Also the order of the mappings may be backwards.
if (newMapper.parentFieldMapper().active()) { if (newMapper.parentFieldMapper().active()) {
IndexMetaData indexMetaData = currentState.metaData().index(index); IndexMetaData indexMetaData = metaData.index(index);
for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) { for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) {
if (newMapper.parentFieldMapper().type().equals(mapping.value.type())) { if (newMapper.parentFieldMapper().type().equals(mapping.value.type())) {
throw new IllegalArgumentException("can't add a _parent field that points to an already existing type"); throw new IllegalArgumentException("can't add a _parent field that points to an already existing type");
@ -290,11 +301,11 @@ public class MetaDataMappingService extends AbstractComponent {
if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && !PercolatorService.TYPE_NAME.equals(mappingType) && mappingType.charAt(0) == '_') { if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && !PercolatorService.TYPE_NAME.equals(mappingType) && mappingType.charAt(0) == '_') {
throw new InvalidTypeNameException("Document mapping type name can't start with '_'"); throw new InvalidTypeNameException("Document mapping type name can't start with '_'");
} }
MetaData.Builder builder = MetaData.builder(currentState.metaData()); MetaData.Builder builder = MetaData.builder(metaData);
for (String index : request.indices()) { for (Index index : indices) {
// do the actual merge here on the master, and update the mapping source // do the actual merge here on the master, and update the mapping source
IndexService indexService = indicesService.indexService(index); IndexService indexService = indicesService.indexService(index);
if (indexService == null) { if (indexService == null) { // TODO this seems impossible given we use indexServiceSafe above
continue; continue;
} }
@ -326,7 +337,7 @@ public class MetaDataMappingService extends AbstractComponent {
} }
} }
IndexMetaData indexMetaData = currentState.metaData().index(index); IndexMetaData indexMetaData = metaData.index(index);
if (indexMetaData == null) { if (indexMetaData == null) {
throw new IndexNotFoundException(index); throw new IndexNotFoundException(index);
} }

View File

@ -19,24 +19,40 @@
package org.elasticsearch.cluster.node; package org.elasticsearch.cluster.node;
import org.elasticsearch.Version;
import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Random;
import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CopyOnWriteArrayList;
/** /**
*/ */
public class DiscoveryNodeService extends AbstractComponent { public class DiscoveryNodeService extends AbstractComponent {
public static final Setting<Long> NODE_ID_SEED_SETTING =
// don't use node.id.seed so it won't be seen as an attribute
Setting.longSetting("node_id.seed", 0L, Long.MIN_VALUE, false, Setting.Scope.CLUSTER);
private final List<CustomAttributesProvider> customAttributesProviders = new CopyOnWriteArrayList<>(); private final List<CustomAttributesProvider> customAttributesProviders = new CopyOnWriteArrayList<>();
private final Version version;
@Inject @Inject
public DiscoveryNodeService(Settings settings) { public DiscoveryNodeService(Settings settings, Version version) {
super(settings); super(settings);
this.version = version;
}
public static String generateNodeId(Settings settings) {
Random random = Randomness.get(settings, NODE_ID_SEED_SETTING);
return Strings.randomBase64UUID(random);
} }
public DiscoveryNodeService addCustomAttributeProvider(CustomAttributesProvider customAttributesProvider) { public DiscoveryNodeService addCustomAttributeProvider(CustomAttributesProvider customAttributesProvider) {
@ -44,7 +60,7 @@ public class DiscoveryNodeService extends AbstractComponent {
return this; return this;
} }
public Map<String, String> buildAttributes() { public DiscoveryNode buildLocalNode(TransportAddress publishAddress) {
Map<String, String> attributes = new HashMap<>(settings.getByPrefix("node.").getAsMap()); Map<String, String> attributes = new HashMap<>(settings.getByPrefix("node.").getAsMap());
attributes.remove("name"); // name is extracted in other places attributes.remove("name"); // name is extracted in other places
if (attributes.containsKey("client")) { if (attributes.containsKey("client")) {
@ -76,10 +92,11 @@ public class DiscoveryNodeService extends AbstractComponent {
} }
} }
return attributes; final String nodeId = generateNodeId(settings);
return new DiscoveryNode(settings.get("node.name"), nodeId, publishAddress, attributes, version);
} }
public static interface CustomAttributesProvider { public interface CustomAttributesProvider {
Map<String, String> buildAttributes(); Map<String, String> buildAttributes();
} }

View File

@ -19,7 +19,6 @@
package org.elasticsearch.cluster.service; package org.elasticsearch.cluster.service;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.AckedClusterStateTaskListener; import org.elasticsearch.cluster.AckedClusterStateTaskListener;
import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterName;
@ -32,19 +31,18 @@ import org.elasticsearch.cluster.ClusterStateTaskExecutor;
import org.elasticsearch.cluster.ClusterStateTaskListener; import org.elasticsearch.cluster.ClusterStateTaskListener;
import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterStateUpdateTask;
import org.elasticsearch.cluster.LocalNodeMasterListener; import org.elasticsearch.cluster.LocalNodeMasterListener;
import org.elasticsearch.cluster.NodeConnectionsService;
import org.elasticsearch.cluster.TimeoutClusterStateListener; import org.elasticsearch.cluster.TimeoutClusterStateListener;
import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlock;
import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.metadata.ProcessClusterEventTimeoutException; import org.elasticsearch.cluster.metadata.ProcessClusterEventTimeoutException;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodeService;
import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.OperationRouting; import org.elasticsearch.cluster.routing.OperationRouting;
import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Priority; import org.elasticsearch.common.Priority;
import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
@ -54,7 +52,6 @@ import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.text.Text; import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.common.util.concurrent.CountDown;
@ -65,9 +62,7 @@ import org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor;
import org.elasticsearch.common.util.concurrent.PrioritizedRunnable; import org.elasticsearch.common.util.concurrent.PrioritizedRunnable;
import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.common.util.iterable.Iterables;
import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.tasks.TaskManager;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
@ -78,8 +73,6 @@ import java.util.Locale;
import java.util.Map; import java.util.Map;
import java.util.Objects; import java.util.Objects;
import java.util.Queue; import java.util.Queue;
import java.util.Random;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.Executor; import java.util.concurrent.Executor;
import java.util.concurrent.Future; import java.util.concurrent.Future;
@ -97,25 +90,15 @@ import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadF
public class InternalClusterService extends AbstractLifecycleComponent<ClusterService> implements ClusterService { public class InternalClusterService extends AbstractLifecycleComponent<ClusterService> implements ClusterService {
public static final Setting<TimeValue> CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING = Setting.positiveTimeSetting("cluster.service.slow_task_logging_threshold", TimeValue.timeValueSeconds(30), true, Setting.Scope.CLUSTER); public static final Setting<TimeValue> CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING = Setting.positiveTimeSetting("cluster.service.slow_task_logging_threshold", TimeValue.timeValueSeconds(30), true, Setting.Scope.CLUSTER);
public static final Setting<TimeValue> CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING = Setting.positiveTimeSetting("cluster.service.reconnect_interval", TimeValue.timeValueSeconds(10), false, Setting.Scope.CLUSTER);
public static final String UPDATE_THREAD_NAME = "clusterService#updateTask"; public static final String UPDATE_THREAD_NAME = "clusterService#updateTask";
public static final Setting<Long> NODE_ID_SEED_SETTING =
// don't use node.id.seed so it won't be seen as an attribute
Setting.longSetting("node_id.seed", 0L, Long.MIN_VALUE, false, Setting.Scope.CLUSTER);
private final ThreadPool threadPool; private final ThreadPool threadPool;
private BiConsumer<ClusterChangedEvent, Discovery.AckListener> clusterStatePublisher; private BiConsumer<ClusterChangedEvent, Discovery.AckListener> clusterStatePublisher;
private final OperationRouting operationRouting; private final OperationRouting operationRouting;
private final TransportService transportService;
private final ClusterSettings clusterSettings; private final ClusterSettings clusterSettings;
private final DiscoveryNodeService discoveryNodeService;
private final Version version;
private final TimeValue reconnectInterval;
private TimeValue slowTaskLoggingThreshold; private TimeValue slowTaskLoggingThreshold;
@ -140,47 +123,49 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
private final ClusterBlocks.Builder initialBlocks; private final ClusterBlocks.Builder initialBlocks;
private final TaskManager taskManager; private NodeConnectionsService nodeConnectionsService;
private volatile ScheduledFuture reconnectToNodes;
@Inject @Inject
public InternalClusterService(Settings settings, OperationRouting operationRouting, TransportService transportService, public InternalClusterService(Settings settings, OperationRouting operationRouting,
ClusterSettings clusterSettings, ThreadPool threadPool, ClusterName clusterName, DiscoveryNodeService discoveryNodeService, Version version) { ClusterSettings clusterSettings, ThreadPool threadPool, ClusterName clusterName) {
super(settings); super(settings);
this.operationRouting = operationRouting; this.operationRouting = operationRouting;
this.transportService = transportService;
this.threadPool = threadPool; this.threadPool = threadPool;
this.clusterSettings = clusterSettings; this.clusterSettings = clusterSettings;
this.discoveryNodeService = discoveryNodeService;
this.version = version;
// will be replaced on doStart. // will be replaced on doStart.
this.clusterState = ClusterState.builder(clusterName).build(); this.clusterState = ClusterState.builder(clusterName).build();
this.clusterSettings.addSettingsUpdateConsumer(CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, this::setSlowTaskLoggingThreshold); this.clusterSettings.addSettingsUpdateConsumer(CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, this::setSlowTaskLoggingThreshold);
this.reconnectInterval = CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING.get(settings);
this.slowTaskLoggingThreshold = CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings); this.slowTaskLoggingThreshold = CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings);
localNodeMasterListeners = new LocalNodeMasterListeners(threadPool); localNodeMasterListeners = new LocalNodeMasterListeners(threadPool);
initialBlocks = ClusterBlocks.builder(); initialBlocks = ClusterBlocks.builder();
taskManager = transportService.getTaskManager();
} }
private void setSlowTaskLoggingThreshold(TimeValue slowTaskLoggingThreshold) { private void setSlowTaskLoggingThreshold(TimeValue slowTaskLoggingThreshold) {
this.slowTaskLoggingThreshold = slowTaskLoggingThreshold; this.slowTaskLoggingThreshold = slowTaskLoggingThreshold;
} }
public void setClusterStatePublisher(BiConsumer<ClusterChangedEvent, Discovery.AckListener> publisher) { synchronized public void setClusterStatePublisher(BiConsumer<ClusterChangedEvent, Discovery.AckListener> publisher) {
clusterStatePublisher = publisher; clusterStatePublisher = publisher;
} }
synchronized public void setLocalNode(DiscoveryNode localNode) {
assert clusterState.nodes().localNodeId() == null : "local node is already set";
DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.nodes()).put(localNode).localNodeId(localNode.id());
this.clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).build();
}
synchronized public void setNodeConnectionsService(NodeConnectionsService nodeConnectionsService) {
assert this.nodeConnectionsService == null : "nodeConnectionsService is already set";
this.nodeConnectionsService = nodeConnectionsService;
}
@Override @Override
public void addInitialStateBlock(ClusterBlock block) throws IllegalStateException { synchronized public void addInitialStateBlock(ClusterBlock block) throws IllegalStateException {
if (lifecycle.started()) { if (lifecycle.started()) {
throw new IllegalStateException("can't set initial block when started"); throw new IllegalStateException("can't set initial block when started");
} }
@ -188,12 +173,12 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
} }
@Override @Override
public void removeInitialStateBlock(ClusterBlock block) throws IllegalStateException { synchronized public void removeInitialStateBlock(ClusterBlock block) throws IllegalStateException {
removeInitialStateBlock(block.id()); removeInitialStateBlock(block.id());
} }
@Override @Override
public void removeInitialStateBlock(int blockId) throws IllegalStateException { synchronized public void removeInitialStateBlock(int blockId) throws IllegalStateException {
if (lifecycle.started()) { if (lifecycle.started()) {
throw new IllegalStateException("can't set initial block when started"); throw new IllegalStateException("can't set initial block when started");
} }
@ -201,26 +186,18 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
} }
@Override @Override
protected void doStart() { synchronized protected void doStart() {
Objects.requireNonNull(clusterStatePublisher, "please set a cluster state publisher before starting"); Objects.requireNonNull(clusterStatePublisher, "please set a cluster state publisher before starting");
Objects.requireNonNull(clusterState.nodes().localNode(), "please set the local node before starting");
Objects.requireNonNull(nodeConnectionsService, "please set the node connection service before starting");
add(localNodeMasterListeners); add(localNodeMasterListeners);
add(taskManager);
this.clusterState = ClusterState.builder(clusterState).blocks(initialBlocks).build(); this.clusterState = ClusterState.builder(clusterState).blocks(initialBlocks).build();
this.updateTasksExecutor = EsExecutors.newSinglePrioritizing(UPDATE_THREAD_NAME, daemonThreadFactory(settings, UPDATE_THREAD_NAME), threadPool.getThreadContext()); this.updateTasksExecutor = EsExecutors.newSinglePrioritizing(UPDATE_THREAD_NAME, daemonThreadFactory(settings, UPDATE_THREAD_NAME), threadPool.getThreadContext());
this.reconnectToNodes = threadPool.schedule(reconnectInterval, ThreadPool.Names.GENERIC, new ReconnectToNodes()); this.clusterState = ClusterState.builder(clusterState).blocks(initialBlocks).build();
Map<String, String> nodeAttributes = discoveryNodeService.buildAttributes();
// note, we rely on the fact that its a new id each time we start, see FD and "kill -9" handling
final String nodeId = generateNodeId(settings);
final TransportAddress publishAddress = transportService.boundAddress().publishAddress();
DiscoveryNode localNode = new DiscoveryNode(settings.get("node.name"), nodeId, publishAddress, nodeAttributes, version);
DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder().put(localNode).localNodeId(localNode.id());
this.clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).blocks(initialBlocks).build();
this.transportService.setLocalNode(localNode);
} }
@Override @Override
protected void doStop() { synchronized protected void doStop() {
FutureUtils.cancel(this.reconnectToNodes);
for (NotifyTimeout onGoingTimeout : onGoingTimeouts) { for (NotifyTimeout onGoingTimeout : onGoingTimeouts) {
onGoingTimeout.cancel(); onGoingTimeout.cancel();
onGoingTimeout.listener.onClose(); onGoingTimeout.listener.onClose();
@ -230,7 +207,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
} }
@Override @Override
protected void doClose() { synchronized protected void doClose() {
} }
@Override @Override
@ -400,11 +377,6 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
return updateTasksExecutor.getMaxTaskWaitTime(); return updateTasksExecutor.getMaxTaskWaitTime();
} }
@Override
public TaskManager getTaskManager() {
return taskManager;
}
/** asserts that the current thread is the cluster state update thread */ /** asserts that the current thread is the cluster state update thread */
public boolean assertClusterStateThread() { public boolean assertClusterStateThread() {
assert Thread.currentThread().getName().contains(InternalClusterService.UPDATE_THREAD_NAME) : "not called from the cluster state update thread"; assert Thread.currentThread().getName().contains(InternalClusterService.UPDATE_THREAD_NAME) : "not called from the cluster state update thread";
@ -457,14 +429,14 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
return; return;
} }
ClusterStateTaskExecutor.BatchResult<T> batchResult; ClusterStateTaskExecutor.BatchResult<T> batchResult;
long startTimeNS = System.nanoTime(); long startTimeNS = currentTimeInNanos();
try { try {
List<T> inputs = toExecute.stream().map(tUpdateTask -> tUpdateTask.task).collect(Collectors.toList()); List<T> inputs = toExecute.stream().map(tUpdateTask -> tUpdateTask.task).collect(Collectors.toList());
batchResult = executor.execute(previousClusterState, inputs); batchResult = executor.execute(previousClusterState, inputs);
} catch (Throwable e) { } catch (Throwable e) {
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS))); TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
if (logger.isTraceEnabled()) { if (logger.isTraceEnabled()) {
StringBuilder sb = new StringBuilder("failed to execute cluster state update in ").append(executionTime).append(", state:\nversion [").append(previousClusterState.version()).append("], source [").append(source).append("]\n"); StringBuilder sb = new StringBuilder("failed to execute cluster state update in [").append(executionTime).append("], state:\nversion [").append(previousClusterState.version()).append("], source [").append(source).append("]\n");
sb.append(previousClusterState.nodes().prettyPrint()); sb.append(previousClusterState.nodes().prettyPrint());
sb.append(previousClusterState.routingTable().prettyPrint()); sb.append(previousClusterState.routingTable().prettyPrint());
sb.append(previousClusterState.getRoutingNodes().prettyPrint()); sb.append(previousClusterState.getRoutingNodes().prettyPrint());
@ -509,8 +481,8 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
} }
task.listener.clusterStateProcessed(task.source, previousClusterState, newClusterState); task.listener.clusterStateProcessed(task.source, previousClusterState, newClusterState);
} }
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS))); TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
logger.debug("processing [{}]: took {} no change in cluster_state", source, executionTime); logger.debug("processing [{}]: took [{}] no change in cluster_state", source, executionTime);
warnAboutSlowTaskIfNeeded(executionTime, source); warnAboutSlowTaskIfNeeded(executionTime, source);
return; return;
} }
@ -568,15 +540,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
} }
} }
// TODO, do this in parallel (and wait) nodeConnectionsService.connectToAddedNodes(clusterChangedEvent);
for (DiscoveryNode node : nodesDelta.addedNodes()) {
try {
transportService.connectToNode(node);
} catch (Throwable e) {
// the fault detection will detect it as failed as well
logger.warn("failed to connect to node [" + node + "]", e);
}
}
// if we are the master, publish the new state to all nodes // if we are the master, publish the new state to all nodes
// we publish here before we send a notification to all the listeners, since if it fails // we publish here before we send a notification to all the listeners, since if it fails
@ -612,13 +576,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
} }
} }
for (DiscoveryNode node : nodesDelta.removedNodes()) { nodeConnectionsService.disconnectFromRemovedNodes(clusterChangedEvent);
try {
transportService.disconnectFromNode(node);
} catch (Throwable e) {
logger.warn("failed to disconnect to node [" + node + "]", e);
}
}
newClusterState.status(ClusterState.ClusterStateStatus.APPLIED); newClusterState.status(ClusterState.ClusterStateStatus.APPLIED);
@ -649,11 +607,11 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
logger.error("exception thrown while notifying executor of new cluster state publication [{}]", e, source); logger.error("exception thrown while notifying executor of new cluster state publication [{}]", e, source);
} }
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS))); TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
logger.debug("processing [{}]: took {} done applying updated cluster_state (version: {}, uuid: {})", source, executionTime, newClusterState.version(), newClusterState.stateUUID()); logger.debug("processing [{}]: took [{}] done applying updated cluster_state (version: {}, uuid: {})", source, executionTime, newClusterState.version(), newClusterState.stateUUID());
warnAboutSlowTaskIfNeeded(executionTime, source); warnAboutSlowTaskIfNeeded(executionTime, source);
} catch (Throwable t) { } catch (Throwable t) {
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS))); TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
StringBuilder sb = new StringBuilder("failed to apply updated cluster state in ").append(executionTime).append(":\nversion [").append(newClusterState.version()).append("], uuid [").append(newClusterState.stateUUID()).append("], source [").append(source).append("]\n"); StringBuilder sb = new StringBuilder("failed to apply updated cluster state in ").append(executionTime).append(":\nversion [").append(newClusterState.version()).append("], uuid [").append(newClusterState.stateUUID()).append("], source [").append(source).append("]\n");
sb.append(newClusterState.nodes().prettyPrint()); sb.append(newClusterState.nodes().prettyPrint());
sb.append(newClusterState.routingTable().prettyPrint()); sb.append(newClusterState.routingTable().prettyPrint());
@ -664,6 +622,9 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
} }
// this one is overridden in tests so we can control time
protected long currentTimeInNanos() {return System.nanoTime();}
private static SafeClusterStateTaskListener safe(ClusterStateTaskListener listener, ESLogger logger) { private static SafeClusterStateTaskListener safe(ClusterStateTaskListener listener, ESLogger logger) {
if (listener instanceof AckedClusterStateTaskListener) { if (listener instanceof AckedClusterStateTaskListener) {
return new SafeAckedClusterStateTaskListener((AckedClusterStateTaskListener) listener, logger); return new SafeAckedClusterStateTaskListener((AckedClusterStateTaskListener) listener, logger);
@ -777,7 +738,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
private void warnAboutSlowTaskIfNeeded(TimeValue executionTime, String source) { private void warnAboutSlowTaskIfNeeded(TimeValue executionTime, String source) {
if (executionTime.getMillis() > slowTaskLoggingThreshold.getMillis()) { if (executionTime.getMillis() > slowTaskLoggingThreshold.getMillis()) {
logger.warn("cluster state update task [{}] took {} above the warn threshold of {}", source, executionTime, slowTaskLoggingThreshold); logger.warn("cluster state update task [{}] took [{}] above the warn threshold of {}", source, executionTime, slowTaskLoggingThreshold);
} }
} }
@ -809,64 +770,6 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
} }
} }
private class ReconnectToNodes implements Runnable {
private ConcurrentMap<DiscoveryNode, Integer> failureCount = ConcurrentCollections.newConcurrentMap();
@Override
public void run() {
// master node will check against all nodes if its alive with certain discoveries implementations,
// but we can't rely on that, so we check on it as well
for (DiscoveryNode node : clusterState.nodes()) {
if (lifecycle.stoppedOrClosed()) {
return;
}
if (clusterState.nodes().nodeExists(node.id())) { // we double check existence of node since connectToNode might take time...
if (!transportService.nodeConnected(node)) {
try {
transportService.connectToNode(node);
} catch (Exception e) {
if (lifecycle.stoppedOrClosed()) {
return;
}
if (clusterState.nodes().nodeExists(node.id())) { // double check here as well, maybe its gone?
Integer nodeFailureCount = failureCount.get(node);
if (nodeFailureCount == null) {
nodeFailureCount = 1;
} else {
nodeFailureCount = nodeFailureCount + 1;
}
// log every 6th failure
if ((nodeFailureCount % 6) == 0) {
// reset the failure count...
nodeFailureCount = 0;
logger.warn("failed to reconnect to node {}", e, node);
}
failureCount.put(node, nodeFailureCount);
}
}
}
}
}
// go over and remove failed nodes that have been removed
DiscoveryNodes nodes = clusterState.nodes();
for (Iterator<DiscoveryNode> failedNodesIt = failureCount.keySet().iterator(); failedNodesIt.hasNext(); ) {
DiscoveryNode failedNode = failedNodesIt.next();
if (!nodes.nodeExists(failedNode.id())) {
failedNodesIt.remove();
}
}
if (lifecycle.started()) {
reconnectToNodes = threadPool.schedule(reconnectInterval, ThreadPool.Names.GENERIC, this);
}
}
}
public static String generateNodeId(Settings settings) {
Random random = Randomness.get(settings, NODE_ID_SEED_SETTING);
return Strings.randomBase64UUID(random);
}
private static class LocalNodeMasterListeners implements ClusterStateListener { private static class LocalNodeMasterListeners implements ClusterStateListener {
private final List<LocalNodeMasterListener> listeners = new CopyOnWriteArrayList<>(); private final List<LocalNodeMasterListener> listeners = new CopyOnWriteArrayList<>();

View File

@ -37,6 +37,7 @@ import org.elasticsearch.common.geo.builders.ShapeBuilder;
import org.elasticsearch.common.text.Text; import org.elasticsearch.common.text.Text;
import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder; import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder;
import org.elasticsearch.ingest.IngestStats;
import org.elasticsearch.search.rescore.RescoreBuilder; import org.elasticsearch.search.rescore.RescoreBuilder;
import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.Task;
import org.elasticsearch.search.aggregations.AggregatorBuilder; import org.elasticsearch.search.aggregations.AggregatorBuilder;
@ -552,6 +553,14 @@ public abstract class StreamInput extends InputStream {
} }
} }
public <T extends Writeable> T readOptionalWritable(T prototype) throws IOException {
if (readBoolean()) {
return (T) prototype.readFrom(this);
} else {
return null;
}
}
public <T extends Throwable> T readThrowable() throws IOException { public <T extends Throwable> T readThrowable() throws IOException {
if (readBoolean()) { if (readBoolean()) {
int key = readVInt(); int key = readVInt();

View File

@ -520,6 +520,15 @@ public abstract class StreamOutput extends OutputStream {
} }
} }
public void writeOptionalWriteable(@Nullable Writeable writeable) throws IOException {
if (writeable != null) {
writeBoolean(true);
writeable.writeTo(this);
} else {
writeBoolean(false);
}
}
public void writeThrowable(Throwable throwable) throws IOException { public void writeThrowable(Throwable throwable) throws IOException {
if (throwable == null) { if (throwable == null) {
writeBoolean(false); writeBoolean(false);

View File

@ -235,11 +235,7 @@ public class Lucene {
@Override @Override
protected Object doBody(String segmentFileName) throws IOException { protected Object doBody(String segmentFileName) throws IOException {
try (IndexInput input = directory.openInput(segmentFileName, IOContext.READ)) { try (IndexInput input = directory.openInput(segmentFileName, IOContext.READ)) {
final int format = input.readInt(); CodecUtil.checksumEntireFile(input);
if (format == CodecUtil.CODEC_MAGIC) {
CodecUtil.checksumEntireFile(input);
}
// legacy....
} }
return null; return null;
} }

View File

@ -29,8 +29,10 @@ import org.elasticsearch.client.transport.TransportClientNodesService;
import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.ClusterModule;
import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.InternalClusterInfoService; import org.elasticsearch.cluster.InternalClusterInfoService;
import org.elasticsearch.cluster.NodeConnectionsService;
import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodeService;
import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
@ -259,7 +261,7 @@ public final class ClusterSettings extends AbstractScopedSettings {
TransportService.TRACE_LOG_INCLUDE_SETTING, TransportService.TRACE_LOG_INCLUDE_SETTING,
TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING, TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING,
ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING, ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING,
InternalClusterService.CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING, NodeConnectionsService.CLUSTER_NODE_RECONNECT_INTERVAL_SETTING,
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING, HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING,
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING, HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING,
Transport.TRANSPORT_TCP_COMPRESS, Transport.TRANSPORT_TCP_COMPRESS,
@ -326,7 +328,7 @@ public final class ClusterSettings extends AbstractScopedSettings {
Environment.PATH_SCRIPTS_SETTING, Environment.PATH_SCRIPTS_SETTING,
Environment.PATH_SHARED_DATA_SETTING, Environment.PATH_SHARED_DATA_SETTING,
Environment.PIDFILE_SETTING, Environment.PIDFILE_SETTING,
InternalClusterService.NODE_ID_SEED_SETTING, DiscoveryNodeService.NODE_ID_SEED_SETTING,
DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING, DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING,
DiscoveryModule.DISCOVERY_TYPE_SETTING, DiscoveryModule.DISCOVERY_TYPE_SETTING,
DiscoveryModule.ZEN_MASTER_SERVICE_TYPE_SETTING, DiscoveryModule.ZEN_MASTER_SERVICE_TYPE_SETTING,

View File

@ -20,7 +20,10 @@
package org.elasticsearch.common.util.concurrent; package org.elasticsearch.common.util.concurrent;
import org.elasticsearch.common.lease.Releasable;
import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.ReentrantLock; import java.util.concurrent.locks.ReentrantLock;
@ -29,9 +32,8 @@ import java.util.concurrent.locks.ReentrantLock;
* created the first time they are acquired and removed if no thread hold the * created the first time they are acquired and removed if no thread hold the
* lock. The latter is important to assure that the list of locks does not grow * lock. The latter is important to assure that the list of locks does not grow
* infinitely. * infinitely.
* *
* A Thread can acquire a lock only once. *
*
* */ * */
public class KeyedLock<T> { public class KeyedLock<T> {
@ -50,48 +52,38 @@ public class KeyedLock<T> {
private final ConcurrentMap<T, KeyLock> map = ConcurrentCollections.newConcurrentMap(); private final ConcurrentMap<T, KeyLock> map = ConcurrentCollections.newConcurrentMap();
protected final ThreadLocal<KeyLock> threadLocal = new ThreadLocal<>(); public Releasable acquire(T key) {
assert isHeldByCurrentThread(key) == false : "lock for " + key + " is already heald by this thread";
public void acquire(T key) {
while (true) { while (true) {
if (threadLocal.get() != null) {
// if we are here, the thread already has the lock
throw new IllegalStateException("Lock already acquired in Thread" + Thread.currentThread().getId()
+ " for key " + key);
}
KeyLock perNodeLock = map.get(key); KeyLock perNodeLock = map.get(key);
if (perNodeLock == null) { if (perNodeLock == null) {
KeyLock newLock = new KeyLock(fair); KeyLock newLock = new KeyLock(fair);
perNodeLock = map.putIfAbsent(key, newLock); perNodeLock = map.putIfAbsent(key, newLock);
if (perNodeLock == null) { if (perNodeLock == null) {
newLock.lock(); newLock.lock();
threadLocal.set(newLock); return new ReleasableLock(key, newLock);
return;
} }
} }
assert perNodeLock != null; assert perNodeLock != null;
int i = perNodeLock.count.get(); int i = perNodeLock.count.get();
if (i > 0 && perNodeLock.count.compareAndSet(i, i + 1)) { if (i > 0 && perNodeLock.count.compareAndSet(i, i + 1)) {
perNodeLock.lock(); perNodeLock.lock();
threadLocal.set(perNodeLock); return new ReleasableLock(key, perNodeLock);
return;
} }
} }
} }
public void release(T key) { public boolean isHeldByCurrentThread(T key) {
KeyLock lock = threadLocal.get(); KeyLock lock = map.get(key);
if (lock == null) { if (lock == null) {
throw new IllegalStateException("Lock not acquired"); return false;
} }
release(key, lock); return lock.isHeldByCurrentThread();
} }
void release(T key, KeyLock lock) { void release(T key, KeyLock lock) {
assert lock.isHeldByCurrentThread();
assert lock == map.get(key); assert lock == map.get(key);
lock.unlock(); lock.unlock();
threadLocal.set(null);
int decrementAndGet = lock.count.decrementAndGet(); int decrementAndGet = lock.count.decrementAndGet();
if (decrementAndGet == 0) { if (decrementAndGet == 0) {
map.remove(key, lock); map.remove(key, lock);
@ -99,6 +91,24 @@ public class KeyedLock<T> {
} }
private final class ReleasableLock implements Releasable {
final T key;
final KeyLock lock;
final AtomicBoolean closed = new AtomicBoolean();
private ReleasableLock(T key, KeyLock lock) {
this.key = key;
this.lock = lock;
}
@Override
public void close() {
if (closed.compareAndSet(false, true)) {
release(key, lock);
}
}
}
@SuppressWarnings("serial") @SuppressWarnings("serial")
private final static class KeyLock extends ReentrantLock { private final static class KeyLock extends ReentrantLock {
KeyLock(boolean fair) { KeyLock(boolean fair) {

View File

@ -52,7 +52,6 @@ public final class IndexingSlowLog implements IndexingOperationListener {
private SlowLogLevel level; private SlowLogLevel level;
private final ESLogger indexLogger; private final ESLogger indexLogger;
private final ESLogger deleteLogger;
private static final String INDEX_INDEXING_SLOWLOG_PREFIX = "index.indexing.slowlog"; private static final String INDEX_INDEXING_SLOWLOG_PREFIX = "index.indexing.slowlog";
public static final Setting<TimeValue> INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING = Setting.timeSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.warn", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX); public static final Setting<TimeValue> INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING = Setting.timeSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.warn", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX);
@ -76,16 +75,7 @@ public final class IndexingSlowLog implements IndexingOperationListener {
}, true, Setting.Scope.INDEX); }, true, Setting.Scope.INDEX);
IndexingSlowLog(IndexSettings indexSettings) { IndexingSlowLog(IndexSettings indexSettings) {
this(indexSettings, Loggers.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX + ".index"), this.indexLogger = Loggers.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX + ".index", indexSettings.getSettings());
Loggers.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX + ".delete"));
}
/**
* Build with the specified loggers. Only used to testing.
*/
IndexingSlowLog(IndexSettings indexSettings, ESLogger indexLogger, ESLogger deleteLogger) {
this.indexLogger = indexLogger;
this.deleteLogger = deleteLogger;
this.index = indexSettings.getIndex(); this.index = indexSettings.getIndex();
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING, this::setReformat); indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING, this::setReformat);
@ -111,7 +101,6 @@ public final class IndexingSlowLog implements IndexingOperationListener {
private void setLevel(SlowLogLevel level) { private void setLevel(SlowLogLevel level) {
this.level = level; this.level = level;
this.indexLogger.setLevel(level.name()); this.indexLogger.setLevel(level.name());
this.deleteLogger.setLevel(level.name());
} }
private void setWarnThreshold(TimeValue warnThreshold) { private void setWarnThreshold(TimeValue warnThreshold) {

View File

@ -20,10 +20,14 @@
package org.elasticsearch.index.fielddata; package org.elasticsearch.index.fielddata;
import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReaderContext;
import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.ReaderUtil;
import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.FieldComparatorSource; import org.apache.lucene.search.FieldComparatorSource;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.SortField; import org.apache.lucene.search.SortField;
import org.apache.lucene.search.Weight; import org.apache.lucene.search.Weight;
@ -122,11 +126,11 @@ public interface IndexFieldData<FD extends AtomicFieldData> extends IndexCompone
public static class Nested { public static class Nested {
private final BitSetProducer rootFilter; private final BitSetProducer rootFilter;
private final Weight innerFilter; private final Query innerQuery;
public Nested(BitSetProducer rootFilter, Weight innerFilter) { public Nested(BitSetProducer rootFilter, Query innerQuery) {
this.rootFilter = rootFilter; this.rootFilter = rootFilter;
this.innerFilter = innerFilter; this.innerQuery = innerQuery;
} }
/** /**
@ -140,7 +144,10 @@ public interface IndexFieldData<FD extends AtomicFieldData> extends IndexCompone
* Get a {@link DocIdSet} that matches the inner documents. * Get a {@link DocIdSet} that matches the inner documents.
*/ */
public DocIdSetIterator innerDocs(LeafReaderContext ctx) throws IOException { public DocIdSetIterator innerDocs(LeafReaderContext ctx) throws IOException {
Scorer s = innerFilter.scorer(ctx); final IndexReaderContext topLevelCtx = ReaderUtil.getTopLevelContext(ctx);
IndexSearcher indexSearcher = new IndexSearcher(topLevelCtx);
Weight weight = indexSearcher.createNormalizedWeight(innerQuery, false);
Scorer s = weight.scorer(ctx);
return s == null ? null : s.iterator(); return s == null ? null : s.iterator();
} }
} }

View File

@ -65,7 +65,6 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
public static final String LON = "lon"; public static final String LON = "lon";
public static final String LON_SUFFIX = "." + LON; public static final String LON_SUFFIX = "." + LON;
public static final String GEOHASH = "geohash"; public static final String GEOHASH = "geohash";
public static final String GEOHASH_SUFFIX = "." + GEOHASH;
public static final String IGNORE_MALFORMED = "ignore_malformed"; public static final String IGNORE_MALFORMED = "ignore_malformed";
} }

View File

@ -250,7 +250,8 @@ public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder<GeoBounding
GeoPoint luceneTopLeft = new GeoPoint(topLeft); GeoPoint luceneTopLeft = new GeoPoint(topLeft);
GeoPoint luceneBottomRight = new GeoPoint(bottomRight); GeoPoint luceneBottomRight = new GeoPoint(bottomRight);
if (GeoValidationMethod.isCoerce(validationMethod)) { final Version indexVersionCreated = context.indexVersionCreated();
if (indexVersionCreated.onOrAfter(Version.V_2_2_0) || GeoValidationMethod.isCoerce(validationMethod)) {
// Special case: if the difference between the left and right is 360 and the right is greater than the left, we are asking for // Special case: if the difference between the left and right is 360 and the right is greater than the left, we are asking for
// the complete longitude range so need to set longitude to the complete longitude range // the complete longitude range so need to set longitude to the complete longitude range
double right = luceneBottomRight.getLon(); double right = luceneBottomRight.getLon();
@ -265,7 +266,6 @@ public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder<GeoBounding
} }
} }
final Version indexVersionCreated = context.indexVersionCreated();
if (indexVersionCreated.onOrAfter(Version.V_2_2_0)) { if (indexVersionCreated.onOrAfter(Version.V_2_2_0)) {
// if index created V_2_2 use (soon to be legacy) numeric encoding postings format // if index created V_2_2 use (soon to be legacy) numeric encoding postings format
// if index created V_2_3 > use prefix encoded postings format // if index created V_2_3 > use prefix encoded postings format

View File

@ -219,18 +219,18 @@ public class GeoDistanceQueryBuilder extends AbstractQueryBuilder<GeoDistanceQue
throw new QueryShardException(shardContext, "field [" + fieldName + "] is not a geo_point field"); throw new QueryShardException(shardContext, "field [" + fieldName + "] is not a geo_point field");
} }
final Version indexVersionCreated = shardContext.indexVersionCreated();
QueryValidationException exception = checkLatLon(shardContext.indexVersionCreated().before(Version.V_2_0_0)); QueryValidationException exception = checkLatLon(shardContext.indexVersionCreated().before(Version.V_2_0_0));
if (exception != null) { if (exception != null) {
throw new QueryShardException(shardContext, "couldn't validate latitude/ longitude values", exception); throw new QueryShardException(shardContext, "couldn't validate latitude/ longitude values", exception);
} }
if (GeoValidationMethod.isCoerce(validationMethod)) { if (indexVersionCreated.onOrAfter(Version.V_2_2_0) || GeoValidationMethod.isCoerce(validationMethod)) {
GeoUtils.normalizePoint(center, true, true); GeoUtils.normalizePoint(center, true, true);
} }
double normDistance = geoDistance.normalize(this.distance, DistanceUnit.DEFAULT); double normDistance = geoDistance.normalize(this.distance, DistanceUnit.DEFAULT);
final Version indexVersionCreated = shardContext.indexVersionCreated();
if (indexVersionCreated.before(Version.V_2_2_0)) { if (indexVersionCreated.before(Version.V_2_2_0)) {
GeoPointFieldMapperLegacy.GeoPointFieldType geoFieldType = ((GeoPointFieldMapperLegacy.GeoPointFieldType) fieldType); GeoPointFieldMapperLegacy.GeoPointFieldType geoFieldType = ((GeoPointFieldMapperLegacy.GeoPointFieldType) fieldType);
IndexGeoPointFieldData indexFieldData = shardContext.getForField(fieldType); IndexGeoPointFieldData indexFieldData = shardContext.getForField(fieldType);

View File

@ -120,9 +120,6 @@ public class GeoDistanceQueryParser implements QueryParser<GeoDistanceQueryBuild
} else if (currentFieldName.endsWith(GeoPointFieldMapper.Names.LON_SUFFIX)) { } else if (currentFieldName.endsWith(GeoPointFieldMapper.Names.LON_SUFFIX)) {
point.resetLon(parser.doubleValue()); point.resetLon(parser.doubleValue());
fieldName = currentFieldName.substring(0, currentFieldName.length() - GeoPointFieldMapper.Names.LON_SUFFIX.length()); fieldName = currentFieldName.substring(0, currentFieldName.length() - GeoPointFieldMapper.Names.LON_SUFFIX.length());
} else if (currentFieldName.endsWith(GeoPointFieldMapper.Names.GEOHASH_SUFFIX)) {
point.resetFromGeoHash(parser.text());
fieldName = currentFieldName.substring(0, currentFieldName.length() - GeoPointFieldMapper.Names.GEOHASH_SUFFIX.length());
} else if (parseContext.parseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) { } else if (parseContext.parseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) {
queryName = parser.text(); queryName = parser.text();
} else if (parseContext.parseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) { } else if (parseContext.parseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) {

View File

@ -236,7 +236,7 @@ public class GeoDistanceRangeQueryBuilder extends AbstractQueryBuilder<GeoDistan
} }
GeoPoint point = new GeoPoint(this.point); GeoPoint point = new GeoPoint(this.point);
if (GeoValidationMethod.isCoerce(validationMethod)) { if (indexCreatedBeforeV2_2 == false || GeoValidationMethod.isCoerce(validationMethod)) {
GeoUtils.normalizePoint(point, true, true); GeoUtils.normalizePoint(point, true, true);
} }

View File

@ -196,15 +196,6 @@ public class GeoDistanceRangeQueryParser implements QueryParser<GeoDistanceRange
point = new GeoPoint(); point = new GeoPoint();
} }
point.resetLon(parser.doubleValue()); point.resetLon(parser.doubleValue());
} else if (currentFieldName.endsWith(GeoPointFieldMapper.Names.GEOHASH_SUFFIX)) {
String maybeFieldName = currentFieldName.substring(0, currentFieldName.length() - GeoPointFieldMapper.Names.GEOHASH_SUFFIX.length());
if (fieldName == null || fieldName.equals(maybeFieldName)) {
fieldName = maybeFieldName;
} else {
throw new ParsingException(parser.getTokenLocation(), "[" + GeoDistanceRangeQueryBuilder.NAME +
"] field name already set to [" + fieldName + "] but found [" + currentFieldName + "]");
}
point = GeoPoint.fromGeohash(parser.text());
} else if (parseContext.parseFieldMatcher().match(currentFieldName, NAME_FIELD)) { } else if (parseContext.parseFieldMatcher().match(currentFieldName, NAME_FIELD)) {
queryName = parser.text(); queryName = parser.text();
} else if (parseContext.parseFieldMatcher().match(currentFieldName, BOOST_FIELD)) { } else if (parseContext.parseFieldMatcher().match(currentFieldName, BOOST_FIELD)) {

View File

@ -131,13 +131,13 @@ public class GeoPolygonQueryBuilder extends AbstractQueryBuilder<GeoPolygonQuery
} }
} }
if (GeoValidationMethod.isCoerce(validationMethod)) { final Version indexVersionCreated = context.indexVersionCreated();
if (indexVersionCreated.onOrAfter(Version.V_2_2_0) || GeoValidationMethod.isCoerce(validationMethod)) {
for (GeoPoint point : shell) { for (GeoPoint point : shell) {
GeoUtils.normalizePoint(point, true, true); GeoUtils.normalizePoint(point, true, true);
} }
} }
final Version indexVersionCreated = context.indexVersionCreated();
if (indexVersionCreated.before(Version.V_2_2_0)) { if (indexVersionCreated.before(Version.V_2_2_0)) {
IndexGeoPointFieldData indexFieldData = context.getForField(fieldType); IndexGeoPointFieldData indexFieldData = context.getForField(fieldType);
return new GeoPolygonQuery(indexFieldData, shell.toArray(new GeoPoint[shellSize])); return new GeoPolygonQuery(indexFieldData, shell.toArray(new GeoPoint[shellSize]));

View File

@ -31,7 +31,6 @@ import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.index.query.QueryParseContext;
import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.index.query.QueryShardException;
import org.elasticsearch.search.internal.SearchContext;
import java.io.IOException; import java.io.IOException;
@ -61,8 +60,8 @@ public class NestedInnerQueryParseSupport {
protected ObjectMapper nestedObjectMapper; protected ObjectMapper nestedObjectMapper;
private ObjectMapper parentObjectMapper; private ObjectMapper parentObjectMapper;
public NestedInnerQueryParseSupport(XContentParser parser, SearchContext searchContext) { public NestedInnerQueryParseSupport(XContentParser parser, QueryShardContext context) {
shardContext = searchContext.getQueryShardContext(); shardContext = context;
parseContext = shardContext.parseContext(); parseContext = shardContext.parseContext();
shardContext.reset(parser); shardContext.reset(parser);

View File

@ -103,6 +103,7 @@ import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Predicate; import java.util.function.Predicate;
import java.util.stream.Collectors;
import static java.util.Collections.emptyMap; import static java.util.Collections.emptyMap;
import static java.util.Collections.unmodifiableMap; import static java.util.Collections.unmodifiableMap;
@ -185,14 +186,14 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
ExecutorService indicesStopExecutor = Executors.newFixedThreadPool(5, EsExecutors.daemonThreadFactory("indices_shutdown")); ExecutorService indicesStopExecutor = Executors.newFixedThreadPool(5, EsExecutors.daemonThreadFactory("indices_shutdown"));
// Copy indices because we modify it asynchronously in the body of the loop // Copy indices because we modify it asynchronously in the body of the loop
Set<String> indices = new HashSet<>(this.indices.keySet()); final Set<Index> indices = this.indices.values().stream().map(s -> s.index()).collect(Collectors.toSet());
final CountDownLatch latch = new CountDownLatch(indices.size()); final CountDownLatch latch = new CountDownLatch(indices.size());
for (final String index : indices) { for (final Index index : indices) {
indicesStopExecutor.execute(() -> { indicesStopExecutor.execute(() -> {
try { try {
removeIndex(index, "shutdown", false); removeIndex(index, "shutdown", false);
} catch (Throwable e) { } catch (Throwable e) {
logger.warn("failed to remove index on stop [" + index + "]", e); logger.warn("failed to remove index on stop " + index + "", e);
} finally { } finally {
latch.countDown(); latch.countDown();
} }
@ -256,7 +257,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
} }
Map<Index, List<IndexShardStats>> statsByShard = new HashMap<>(); Map<Index, List<IndexShardStats>> statsByShard = new HashMap<>();
for (IndexService indexService : indices.values()) { for (IndexService indexService : this) {
for (IndexShard indexShard : indexService) { for (IndexShard indexShard : indexService) {
try { try {
if (indexShard.routingEntry() == null) { if (indexShard.routingEntry() == null) {
@ -290,17 +291,8 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
return indices.values().iterator(); return indices.values().iterator();
} }
public boolean hasIndex(String index) { public boolean hasIndex(Index index) {
return indices.containsKey(index); return indices.containsKey(index.getUUID());
}
/**
* Returns an IndexService for the specified index if exists otherwise returns <code>null</code>.
*
*/
@Nullable
public IndexService indexService(String index) {
return indices.get(index);
} }
/** /**
@ -309,33 +301,21 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
*/ */
@Nullable @Nullable
public IndexService indexService(Index index) { public IndexService indexService(Index index) {
return indexService(index.getName()); return indices.get(index.getUUID());
}
/**
* Returns an IndexService for the specified index if exists otherwise a {@link IndexNotFoundException} is thrown.
*/
public IndexService indexServiceSafe(String index) {
IndexService indexService = indexService(index);
if (indexService == null) {
throw new IndexNotFoundException(index);
}
return indexService;
} }
/** /**
* Returns an IndexService for the specified index if exists otherwise a {@link IndexNotFoundException} is thrown. * Returns an IndexService for the specified index if exists otherwise a {@link IndexNotFoundException} is thrown.
*/ */
public IndexService indexServiceSafe(Index index) { public IndexService indexServiceSafe(Index index) {
IndexService indexService = indexServiceSafe(index.getName()); IndexService indexService = indices.get(index.getUUID());
if (indexService.indexUUID().equals(index.getUUID()) == false) { if (indexService == null) {
throw new IndexNotFoundException(index); throw new IndexNotFoundException(index);
} }
assert indexService.indexUUID().equals(index.getUUID()) : "uuid mismatch local: " + indexService.indexUUID() + " incoming: " + index.getUUID();
return indexService; return indexService;
} }
/** /**
* Creates a new {@link IndexService} for the given metadata. * Creates a new {@link IndexService} for the given metadata.
* @param indexMetaData the index metadata to create the index for * @param indexMetaData the index metadata to create the index for
@ -346,10 +326,13 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
if (!lifecycle.started()) { if (!lifecycle.started()) {
throw new IllegalStateException("Can't create an index [" + indexMetaData.getIndex() + "], node is closed"); throw new IllegalStateException("Can't create an index [" + indexMetaData.getIndex() + "], node is closed");
} }
if (indexMetaData.getIndexUUID().equals(IndexMetaData.INDEX_UUID_NA_VALUE)) {
throw new IllegalArgumentException("index must have a real UUID found value: [" + indexMetaData.getIndexUUID() + "]");
}
final Index index = indexMetaData.getIndex(); final Index index = indexMetaData.getIndex();
final Predicate<String> indexNameMatcher = (indexExpression) -> indexNameExpressionResolver.matchesIndex(index.getName(), indexExpression, clusterService.state()); final Predicate<String> indexNameMatcher = (indexExpression) -> indexNameExpressionResolver.matchesIndex(index.getName(), indexExpression, clusterService.state());
final IndexSettings idxSettings = new IndexSettings(indexMetaData, this.settings, indexNameMatcher, indexScopeSetting); final IndexSettings idxSettings = new IndexSettings(indexMetaData, this.settings, indexNameMatcher, indexScopeSetting);
if (indices.containsKey(index.getName())) { if (hasIndex(index)) {
throw new IndexAlreadyExistsException(index); throw new IndexAlreadyExistsException(index);
} }
logger.debug("creating Index [{}], shards [{}]/[{}{}]", logger.debug("creating Index [{}], shards [{}]/[{}{}]",
@ -378,7 +361,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
try { try {
assert indexService.getIndexEventListener() == listener; assert indexService.getIndexEventListener() == listener;
listener.afterIndexCreated(indexService); listener.afterIndexCreated(indexService);
indices = newMapBuilder(indices).put(index.getName(), indexService).immutableMap(); indices = newMapBuilder(indices).put(index.getUUID(), indexService).immutableMap();
success = true; success = true;
return indexService; return indexService;
} finally { } finally {
@ -395,22 +378,24 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
* @param index the index to remove * @param index the index to remove
* @param reason the high level reason causing this removal * @param reason the high level reason causing this removal
*/ */
public void removeIndex(String index, String reason) { public void removeIndex(Index index, String reason) {
removeIndex(index, reason, false); removeIndex(index, reason, false);
} }
private void removeIndex(String index, String reason, boolean delete) { private void removeIndex(Index index, String reason, boolean delete) {
final String indexName = index.getName();
try { try {
final IndexService indexService; final IndexService indexService;
final IndexEventListener listener; final IndexEventListener listener;
synchronized (this) { synchronized (this) {
if (indices.containsKey(index) == false) { if (hasIndex(index) == false) {
return; return;
} }
logger.debug("[{}] closing ... (reason [{}])", index, reason); logger.debug("[{}] closing ... (reason [{}])", indexName, reason);
Map<String, IndexService> newIndices = new HashMap<>(indices); Map<String, IndexService> newIndices = new HashMap<>(indices);
indexService = newIndices.remove(index); indexService = newIndices.remove(index.getUUID());
assert indexService != null : "IndexService is null for index: " + index;
indices = unmodifiableMap(newIndices); indices = unmodifiableMap(newIndices);
listener = indexService.getIndexEventListener(); listener = indexService.getIndexEventListener();
} }
@ -419,9 +404,9 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
if (delete) { if (delete) {
listener.beforeIndexDeleted(indexService); listener.beforeIndexDeleted(indexService);
} }
logger.debug("[{}] closing index service (reason [{}])", index, reason); logger.debug("{} closing index service (reason [{}])", index, reason);
indexService.close(reason, delete); indexService.close(reason, delete);
logger.debug("[{}] closed... (reason [{}])", index, reason); logger.debug("{} closed... (reason [{}])", index, reason);
listener.afterIndexClosed(indexService.index(), indexService.getIndexSettings().getSettings()); listener.afterIndexClosed(indexService.index(), indexService.getIndexSettings().getSettings());
if (delete) { if (delete) {
final IndexSettings indexSettings = indexService.getIndexSettings(); final IndexSettings indexSettings = indexService.getIndexSettings();
@ -474,12 +459,12 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
* Deletes the given index. Persistent parts of the index * Deletes the given index. Persistent parts of the index
* like the shards files, state and transaction logs are removed once all resources are released. * like the shards files, state and transaction logs are removed once all resources are released.
* *
* Equivalent to {@link #removeIndex(String, String)} but fires * Equivalent to {@link #removeIndex(Index, String)} but fires
* different lifecycle events to ensure pending resources of this index are immediately removed. * different lifecycle events to ensure pending resources of this index are immediately removed.
* @param index the index to delete * @param index the index to delete
* @param reason the high level reason causing this delete * @param reason the high level reason causing this delete
*/ */
public void deleteIndex(String index, String reason) throws IOException { public void deleteIndex(Index index, String reason) throws IOException {
removeIndex(index, reason, true); removeIndex(index, reason, true);
} }
@ -505,16 +490,17 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
public void deleteIndexStore(String reason, IndexMetaData metaData, ClusterState clusterState, boolean closed) throws IOException { public void deleteIndexStore(String reason, IndexMetaData metaData, ClusterState clusterState, boolean closed) throws IOException {
if (nodeEnv.hasNodeFile()) { if (nodeEnv.hasNodeFile()) {
synchronized (this) { synchronized (this) {
String indexName = metaData.getIndex().getName(); Index index = metaData.getIndex();
if (indices.containsKey(indexName)) { if (hasIndex(index)) {
String localUUid = indices.get(indexName).indexUUID(); String localUUid = indexService(index).indexUUID();
throw new IllegalStateException("Can't delete index store for [" + indexName + "] - it's still part of the indices service [" + localUUid + "] [" + metaData.getIndexUUID() + "]"); throw new IllegalStateException("Can't delete index store for [" + index.getName() + "] - it's still part of the indices service [" + localUUid + "] [" + metaData.getIndexUUID() + "]");
} }
if (clusterState.metaData().hasIndex(indexName) && (clusterState.nodes().localNode().masterNode() == true)) {
if (clusterState.metaData().hasIndex(index.getName()) && (clusterState.nodes().localNode().masterNode() == true)) {
// we do not delete the store if it is a master eligible node and the index is still in the cluster state // we do not delete the store if it is a master eligible node and the index is still in the cluster state
// because we want to keep the meta data for indices around even if no shards are left here // because we want to keep the meta data for indices around even if no shards are left here
final IndexMetaData index = clusterState.metaData().index(indexName); final IndexMetaData idxMeta = clusterState.metaData().index(index.getName());
throw new IllegalStateException("Can't delete closed index store for [" + indexName + "] - it's still part of the cluster state [" + index.getIndexUUID() + "] [" + metaData.getIndexUUID() + "]"); throw new IllegalStateException("Can't delete closed index store for [" + index.getName() + "] - it's still part of the cluster state [" + idxMeta.getIndexUUID() + "] [" + metaData.getIndexUUID() + "]");
} }
} }
final IndexSettings indexSettings = buildIndexSettings(metaData); final IndexSettings indexSettings = buildIndexSettings(metaData);
@ -607,7 +593,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
* @return true if the index can be deleted on this node * @return true if the index can be deleted on this node
*/ */
public boolean canDeleteIndexContents(Index index, IndexSettings indexSettings, boolean closed) { public boolean canDeleteIndexContents(Index index, IndexSettings indexSettings, boolean closed) {
final IndexService indexService = this.indices.get(index.getName()); final IndexService indexService = indexService(index);
// Closed indices may be deleted, even if they are on a shared // Closed indices may be deleted, even if they are on a shared
// filesystem. Since it is closed we aren't deleting it for relocation // filesystem. Since it is closed we aren't deleting it for relocation
if (indexSettings.isOnSharedFilesystem() == false || closed) { if (indexSettings.isOnSharedFilesystem() == false || closed) {
@ -634,7 +620,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
*/ */
public boolean canDeleteShardContent(ShardId shardId, IndexSettings indexSettings) { public boolean canDeleteShardContent(ShardId shardId, IndexSettings indexSettings) {
assert shardId.getIndex().equals(indexSettings.getIndex()); assert shardId.getIndex().equals(indexSettings.getIndex());
final IndexService indexService = this.indices.get(shardId.getIndexName()); final IndexService indexService = indexService(shardId.getIndex());
if (indexSettings.isOnSharedFilesystem() == false) { if (indexSettings.isOnSharedFilesystem() == false) {
if (indexService != null && nodeEnv.hasNodeFile()) { if (indexService != null && nodeEnv.hasNodeFile()) {
return indexService.hasShard(shardId.id()) == false; return indexService.hasShard(shardId.id()) == false;

View File

@ -46,6 +46,7 @@ import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.Callback; import org.elasticsearch.common.util.Callback;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.IndexShardAlreadyExistsException; import org.elasticsearch.index.IndexShardAlreadyExistsException;
@ -157,13 +158,13 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
// TODO: this feels a bit hacky here, a block disables state persistence, and then we clean the allocated shards, maybe another flag in blocks? // TODO: this feels a bit hacky here, a block disables state persistence, and then we clean the allocated shards, maybe another flag in blocks?
if (event.state().blocks().disableStatePersistence()) { if (event.state().blocks().disableStatePersistence()) {
for (IndexService indexService : indicesService) { for (IndexService indexService : indicesService) {
String index = indexService.index().getName(); Index index = indexService.index();
for (Integer shardId : indexService.shardIds()) { for (Integer shardId : indexService.shardIds()) {
logger.debug("[{}][{}] removing shard (disabled block persistence)", index, shardId); logger.debug("{}[{}] removing shard (disabled block persistence)", index, shardId);
try { try {
indexService.removeShard(shardId, "removing shard (disabled block persistence)"); indexService.removeShard(shardId, "removing shard (disabled block persistence)");
} catch (Throwable e) { } catch (Throwable e) {
logger.warn("[{}] failed to remove shard (disabled block persistence)", e, index); logger.warn("{} failed to remove shard (disabled block persistence)", e, index);
} }
} }
removeIndex(index, "cleaning index (disabled block persistence)"); removeIndex(index, "cleaning index (disabled block persistence)");
@ -201,10 +202,10 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
} }
} }
for (IndexService indexService : indicesService) { for (IndexService indexService : indicesService) {
String index = indexService.index().getName(); Index index = indexService.index();
if (indexService.shardIds().isEmpty()) { if (indexService.shardIds().isEmpty()) {
if (logger.isDebugEnabled()) { if (logger.isDebugEnabled()) {
logger.debug("[{}] cleaning index (no shards allocated)", index); logger.debug("{} cleaning index (no shards allocated)", index);
} }
// clean the index // clean the index
removeIndex(index, "removing index (no shards allocated)"); removeIndex(index, "removing index (no shards allocated)");
@ -222,12 +223,12 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
if (indexMetaData != null) { if (indexMetaData != null) {
if (!indexMetaData.isSameUUID(indexService.indexUUID())) { if (!indexMetaData.isSameUUID(indexService.indexUUID())) {
logger.debug("[{}] mismatch on index UUIDs between cluster state and local state, cleaning the index so it will be recreated", indexMetaData.getIndex()); logger.debug("[{}] mismatch on index UUIDs between cluster state and local state, cleaning the index so it will be recreated", indexMetaData.getIndex());
deleteIndex(indexMetaData.getIndex().getName(), "mismatch on index UUIDs between cluster state and local state, cleaning the index so it will be recreated"); deleteIndex(indexMetaData.getIndex(), "mismatch on index UUIDs between cluster state and local state, cleaning the index so it will be recreated");
} }
} }
} }
for (String index : event.indicesDeleted()) { for (Index index : event.indicesDeleted()) {
if (logger.isDebugEnabled()) { if (logger.isDebugEnabled()) {
logger.debug("[{}] cleaning index, no longer part of the metadata", index); logger.debug("[{}] cleaning index, no longer part of the metadata", index);
} }
@ -243,7 +244,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
indicesService.deleteClosedIndex("closed index no longer part of the metadata", metaData, event.state()); indicesService.deleteClosedIndex("closed index no longer part of the metadata", metaData, event.state());
} }
try { try {
nodeIndexDeletedAction.nodeIndexDeleted(event.state(), index, indexSettings, localNodeId); nodeIndexDeletedAction.nodeIndexDeleted(event.state(), index.getName(), indexSettings, localNodeId);
} catch (Throwable e) { } catch (Throwable e) {
logger.debug("failed to send to master index {} deleted event", e, index); logger.debug("failed to send to master index {} deleted event", e, index);
} }
@ -298,7 +299,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
return; return;
} }
for (ShardRouting shard : routingNode) { for (ShardRouting shard : routingNode) {
if (!indicesService.hasIndex(shard.getIndexName())) { if (!indicesService.hasIndex(shard.index())) {
final IndexMetaData indexMetaData = event.state().metaData().index(shard.index()); final IndexMetaData indexMetaData = event.state().metaData().index(shard.index());
if (logger.isDebugEnabled()) { if (logger.isDebugEnabled()) {
logger.debug("[{}] creating index", indexMetaData.getIndex()); logger.debug("[{}] creating index", indexMetaData.getIndex());
@ -317,7 +318,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
return; return;
} }
for (IndexMetaData indexMetaData : event.state().metaData()) { for (IndexMetaData indexMetaData : event.state().metaData()) {
if (!indicesService.hasIndex(indexMetaData.getIndex().getName())) { if (!indicesService.hasIndex(indexMetaData.getIndex())) {
// we only create / update here // we only create / update here
continue; continue;
} }
@ -325,7 +326,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
if (!event.indexMetaDataChanged(indexMetaData)) { if (!event.indexMetaDataChanged(indexMetaData)) {
continue; continue;
} }
String index = indexMetaData.getIndex().getName(); Index index = indexMetaData.getIndex();
IndexService indexService = indicesService.indexService(index); IndexService indexService = indicesService.indexService(index);
if (indexService == null) { if (indexService == null) {
// already deleted on us, ignore it // already deleted on us, ignore it
@ -339,12 +340,12 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
private void applyMappings(ClusterChangedEvent event) { private void applyMappings(ClusterChangedEvent event) {
// go over and update mappings // go over and update mappings
for (IndexMetaData indexMetaData : event.state().metaData()) { for (IndexMetaData indexMetaData : event.state().metaData()) {
if (!indicesService.hasIndex(indexMetaData.getIndex().getName())) { Index index = indexMetaData.getIndex();
if (!indicesService.hasIndex(index)) {
// we only create / update here // we only create / update here
continue; continue;
} }
boolean requireRefresh = false; boolean requireRefresh = false;
String index = indexMetaData.getIndex().getName();
IndexService indexService = indicesService.indexService(index); IndexService indexService = indicesService.indexService(index);
if (indexService == null) { if (indexService == null) {
// got deleted on us, ignore (closing the node) // got deleted on us, ignore (closing the node)
@ -357,11 +358,11 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
MappingMetaData mappingMd = cursor.value; MappingMetaData mappingMd = cursor.value;
String mappingType = mappingMd.type(); String mappingType = mappingMd.type();
CompressedXContent mappingSource = mappingMd.source(); CompressedXContent mappingSource = mappingMd.source();
requireRefresh |= processMapping(index, mapperService, mappingType, mappingSource); requireRefresh |= processMapping(index.getName(), mapperService, mappingType, mappingSource);
} }
if (requireRefresh && sendRefreshMapping) { if (requireRefresh && sendRefreshMapping) {
nodeMappingRefreshAction.nodeMappingRefresh(event.state(), nodeMappingRefreshAction.nodeMappingRefresh(event.state(),
new NodeMappingRefreshAction.NodeMappingRefreshRequest(index, indexMetaData.getIndexUUID(), new NodeMappingRefreshAction.NodeMappingRefreshRequest(index.getName(), indexMetaData.getIndexUUID(),
event.state().nodes().localNodeId()) event.state().nodes().localNodeId())
); );
} }
@ -727,7 +728,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
} }
} }
private void removeIndex(String index, String reason) { private void removeIndex(Index index, String reason) {
try { try {
indicesService.removeIndex(index, reason); indicesService.removeIndex(index, reason);
} catch (Throwable e) { } catch (Throwable e) {
@ -735,7 +736,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
} }
} }
private void deleteIndex(String index, String reason) { private void deleteIndex(Index index, String reason) {
try { try {
indicesService.deleteIndex(index, reason); indicesService.deleteIndex(index, reason);
} catch (Throwable e) { } catch (Throwable e) {
@ -774,7 +775,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
private class FailedShardHandler implements Callback<IndexShard.ShardFailure> { private class FailedShardHandler implements Callback<IndexShard.ShardFailure> {
@Override @Override
public void handle(final IndexShard.ShardFailure shardFailure) { public void handle(final IndexShard.ShardFailure shardFailure) {
final IndexService indexService = indicesService.indexService(shardFailure.routing.shardId().getIndex().getName()); final IndexService indexService = indicesService.indexService(shardFailure.routing.shardId().getIndex());
final ShardRouting shardRouting = shardFailure.routing; final ShardRouting shardRouting = shardFailure.routing;
threadPool.generic().execute(() -> { threadPool.generic().execute(() -> {
synchronized (mutex) { synchronized (mutex) {

View File

@ -83,7 +83,7 @@ public class RecoverySource extends AbstractComponent implements IndexEventListe
} }
private RecoveryResponse recover(final StartRecoveryRequest request) throws IOException { private RecoveryResponse recover(final StartRecoveryRequest request) throws IOException {
final IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex().getName()); final IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
final IndexShard shard = indexService.getShard(request.shardId().id()); final IndexShard shard = indexService.getShard(request.shardId().id());
// starting recovery from that our (the source) shard state is marking the shard to be in recovery mode as well, otherwise // starting recovery from that our (the source) shard state is marking the shard to be in recovery mode as well, otherwise

View File

@ -137,7 +137,7 @@ public class RecoverySourceHandler {
} }
} }
logger.trace("snapshot translog for recovery. current size is [{}]", translogView.totalOperations()); logger.trace("{} snapshot translog for recovery. current size is [{}]", shard.shardId(), translogView.totalOperations());
try { try {
phase2(translogView.snapshot()); phase2(translogView.snapshot());
} catch (Throwable e) { } catch (Throwable e) {

View File

@ -348,7 +348,7 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe
return null; return null;
} }
ShardId shardId = request.shardId; ShardId shardId = request.shardId;
IndexService indexService = indicesService.indexService(shardId.getIndexName()); IndexService indexService = indicesService.indexService(shardId.getIndex());
if (indexService != null && indexService.indexUUID().equals(request.indexUUID)) { if (indexService != null && indexService.indexUUID().equals(request.indexUUID)) {
return indexService.getShardOrNull(shardId.id()); return indexService.getShardOrNull(shardId.id());
} }

View File

@ -126,7 +126,7 @@ public class TransportNodesListShardStoreMetaData extends TransportNodesAction<T
@Override @Override
protected NodeStoreFilesMetaData nodeOperation(NodeRequest request) { protected NodeStoreFilesMetaData nodeOperation(NodeRequest request) {
if (request.unallocated) { if (request.unallocated) {
IndexService indexService = indicesService.indexService(request.shardId.getIndexName()); IndexService indexService = indicesService.indexService(request.shardId.getIndex());
if (indexService == null) { if (indexService == null) {
return new NodeStoreFilesMetaData(clusterService.localNode(), null); return new NodeStoreFilesMetaData(clusterService.localNode(), null);
} }
@ -150,7 +150,7 @@ public class TransportNodesListShardStoreMetaData extends TransportNodesAction<T
long startTimeNS = System.nanoTime(); long startTimeNS = System.nanoTime();
boolean exists = false; boolean exists = false;
try { try {
IndexService indexService = indicesService.indexService(shardId.getIndexName()); IndexService indexService = indicesService.indexService(shardId.getIndex());
if (indexService != null) { if (indexService != null) {
IndexShard indexShard = indexService.getShardOrNull(shardId.id()); IndexShard indexShard = indexService.getShardOrNull(shardId.id());
if (indexShard != null) { if (indexShard != null) {

View File

@ -0,0 +1,171 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.ingest;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.TimeUnit;
public class IngestStats implements Writeable<IngestStats>, ToXContent {
public final static IngestStats PROTO = new IngestStats(null, null);
private final Stats totalStats;
private final Map<String, Stats> statsPerPipeline;
public IngestStats(Stats totalStats, Map<String, Stats> statsPerPipeline) {
this.totalStats = totalStats;
this.statsPerPipeline = statsPerPipeline;
}
/**
* @return The accumulated stats for all pipelines
*/
public Stats getTotalStats() {
return totalStats;
}
/**
* @return The stats on a per pipeline basis
*/
public Map<String, Stats> getStatsPerPipeline() {
return statsPerPipeline;
}
@Override
public IngestStats readFrom(StreamInput in) throws IOException {
Stats totalStats = Stats.PROTO.readFrom(in);
totalStats.readFrom(in);
int size = in.readVInt();
Map<String, Stats> statsPerPipeline = new HashMap<>(size);
for (int i = 0; i < size; i++) {
Stats stats = Stats.PROTO.readFrom(in);
statsPerPipeline.put(in.readString(), stats);
stats.readFrom(in);
}
return new IngestStats(totalStats, statsPerPipeline);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
totalStats.writeTo(out);
out.writeVLong(statsPerPipeline.size());
for (Map.Entry<String, Stats> entry : statsPerPipeline.entrySet()) {
out.writeString(entry.getKey());
entry.getValue().writeTo(out);
}
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject("ingest");
builder.startObject("total");
totalStats.toXContent(builder, params);
builder.endObject();
builder.startObject("pipelines");
for (Map.Entry<String, Stats> entry : statsPerPipeline.entrySet()) {
builder.startObject(entry.getKey());
entry.getValue().toXContent(builder, params);
builder.endObject();
}
builder.endObject();
builder.endObject();
return builder;
}
public static class Stats implements Writeable<Stats>, ToXContent {
private final static Stats PROTO = new Stats(0, 0, 0, 0);
private final long ingestCount;
private final long ingestTimeInMillis;
private final long ingestCurrent;
private final long ingestFailedCount;
public Stats(long ingestCount, long ingestTimeInMillis, long ingestCurrent, long ingestFailedCount) {
this.ingestCount = ingestCount;
this.ingestTimeInMillis = ingestTimeInMillis;
this.ingestCurrent = ingestCurrent;
this.ingestFailedCount = ingestFailedCount;
}
/**
* @return The total number of executed ingest preprocessing operations.
*/
public long getIngestCount() {
return ingestCount;
}
/**
*
* @return The total time spent of ingest preprocessing in millis.
*/
public long getIngestTimeInMillis() {
return ingestTimeInMillis;
}
/**
* @return The total number of ingest preprocessing operations currently executing.
*/
public long getIngestCurrent() {
return ingestCurrent;
}
/**
* @return The total number of ingest preprocessing operations that have failed.
*/
public long getIngestFailedCount() {
return ingestFailedCount;
}
@Override
public Stats readFrom(StreamInput in) throws IOException {
long ingestCount = in.readVLong();
long ingestTimeInMillis = in.readVLong();
long ingestCurrent = in.readVLong();
long ingestFailedCount = in.readVLong();
return new Stats(ingestCount, ingestTimeInMillis, ingestCurrent, ingestFailedCount);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVLong(ingestCount);
out.writeVLong(ingestTimeInMillis);
out.writeVLong(ingestCurrent);
out.writeVLong(ingestFailedCount);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.field("count", ingestCount);
builder.timeValueField("time_in_millis", "time", ingestTimeInMillis, TimeUnit.MILLISECONDS);
builder.field("current", ingestCurrent);
builder.field("failed", ingestFailedCount);
return builder;
}
}
}

View File

@ -19,23 +19,36 @@
package org.elasticsearch.ingest; package org.elasticsearch.ingest;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateListener;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.metrics.CounterMetric;
import org.elasticsearch.common.metrics.MeanMetric;
import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.ingest.core.IngestDocument; import org.elasticsearch.ingest.core.IngestDocument;
import org.elasticsearch.ingest.core.Pipeline; import org.elasticsearch.ingest.core.Pipeline;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map; import java.util.Map;
import java.util.Optional;
import java.util.concurrent.TimeUnit;
import java.util.function.BiConsumer; import java.util.function.BiConsumer;
import java.util.function.Consumer; import java.util.function.Consumer;
public class PipelineExecutionService { public class PipelineExecutionService implements ClusterStateListener {
private final PipelineStore store; private final PipelineStore store;
private final ThreadPool threadPool; private final ThreadPool threadPool;
private final StatsHolder totalStats = new StatsHolder();
private volatile Map<String, StatsHolder> statsHolderPerPipeline = Collections.emptyMap();
public PipelineExecutionService(PipelineStore store, ThreadPool threadPool) { public PipelineExecutionService(PipelineStore store, ThreadPool threadPool) {
this.store = store; this.store = store;
this.threadPool = threadPool; this.threadPool = threadPool;
@ -89,29 +102,85 @@ public class PipelineExecutionService {
}); });
} }
private void innerExecute(IndexRequest indexRequest, Pipeline pipeline) throws Exception { public IngestStats stats() {
String index = indexRequest.index(); Map<String, StatsHolder> statsHolderPerPipeline = this.statsHolderPerPipeline;
String type = indexRequest.type();
String id = indexRequest.id();
String routing = indexRequest.routing();
String parent = indexRequest.parent();
String timestamp = indexRequest.timestamp();
String ttl = indexRequest.ttl() == null ? null : indexRequest.ttl().toString();
Map<String, Object> sourceAsMap = indexRequest.sourceAsMap();
IngestDocument ingestDocument = new IngestDocument(index, type, id, routing, parent, timestamp, ttl, sourceAsMap);
pipeline.execute(ingestDocument);
Map<IngestDocument.MetaData, String> metadataMap = ingestDocument.extractMetadata(); Map<String, IngestStats.Stats> statsPerPipeline = new HashMap<>(statsHolderPerPipeline.size());
//it's fine to set all metadata fields all the time, as ingest document holds their starting values for (Map.Entry<String, StatsHolder> entry : statsHolderPerPipeline.entrySet()) {
//before ingestion, which might also get modified during ingestion. statsPerPipeline.put(entry.getKey(), entry.getValue().createStats());
indexRequest.index(metadataMap.get(IngestDocument.MetaData.INDEX)); }
indexRequest.type(metadataMap.get(IngestDocument.MetaData.TYPE));
indexRequest.id(metadataMap.get(IngestDocument.MetaData.ID)); return new IngestStats(totalStats.createStats(), statsPerPipeline);
indexRequest.routing(metadataMap.get(IngestDocument.MetaData.ROUTING)); }
indexRequest.parent(metadataMap.get(IngestDocument.MetaData.PARENT));
indexRequest.timestamp(metadataMap.get(IngestDocument.MetaData.TIMESTAMP)); @Override
indexRequest.ttl(metadataMap.get(IngestDocument.MetaData.TTL)); public void clusterChanged(ClusterChangedEvent event) {
indexRequest.source(ingestDocument.getSourceAndMetadata()); IngestMetadata ingestMetadata = event.state().getMetaData().custom(IngestMetadata.TYPE);
if (ingestMetadata != null) {
updatePipelineStats(ingestMetadata);
}
}
void updatePipelineStats(IngestMetadata ingestMetadata) {
boolean changed = false;
Map<String, StatsHolder> newStatsPerPipeline = new HashMap<>(statsHolderPerPipeline);
for (String pipeline : newStatsPerPipeline.keySet()) {
if (ingestMetadata.getPipelines().containsKey(pipeline) == false) {
newStatsPerPipeline.remove(pipeline);
changed = true;
}
}
for (String pipeline : ingestMetadata.getPipelines().keySet()) {
if (newStatsPerPipeline.containsKey(pipeline) == false) {
newStatsPerPipeline.put(pipeline, new StatsHolder());
changed = true;
}
}
if (changed) {
statsHolderPerPipeline = Collections.unmodifiableMap(newStatsPerPipeline);
}
}
private void innerExecute(IndexRequest indexRequest, Pipeline pipeline) throws Exception {
long startTimeInNanos = System.nanoTime();
// the pipeline specific stat holder may not exist and that is fine:
// (e.g. the pipeline may have been removed while we're ingesting a document
Optional<StatsHolder> pipelineStats = Optional.ofNullable(statsHolderPerPipeline.get(pipeline.getId()));
try {
totalStats.preIngest();
pipelineStats.ifPresent(StatsHolder::preIngest);
String index = indexRequest.index();
String type = indexRequest.type();
String id = indexRequest.id();
String routing = indexRequest.routing();
String parent = indexRequest.parent();
String timestamp = indexRequest.timestamp();
String ttl = indexRequest.ttl() == null ? null : indexRequest.ttl().toString();
Map<String, Object> sourceAsMap = indexRequest.sourceAsMap();
IngestDocument ingestDocument = new IngestDocument(index, type, id, routing, parent, timestamp, ttl, sourceAsMap);
pipeline.execute(ingestDocument);
Map<IngestDocument.MetaData, String> metadataMap = ingestDocument.extractMetadata();
//it's fine to set all metadata fields all the time, as ingest document holds their starting values
//before ingestion, which might also get modified during ingestion.
indexRequest.index(metadataMap.get(IngestDocument.MetaData.INDEX));
indexRequest.type(metadataMap.get(IngestDocument.MetaData.TYPE));
indexRequest.id(metadataMap.get(IngestDocument.MetaData.ID));
indexRequest.routing(metadataMap.get(IngestDocument.MetaData.ROUTING));
indexRequest.parent(metadataMap.get(IngestDocument.MetaData.PARENT));
indexRequest.timestamp(metadataMap.get(IngestDocument.MetaData.TIMESTAMP));
indexRequest.ttl(metadataMap.get(IngestDocument.MetaData.TTL));
indexRequest.source(ingestDocument.getSourceAndMetadata());
} catch (Exception e) {
totalStats.ingestFailed();
pipelineStats.ifPresent(StatsHolder::ingestFailed);
throw e;
} finally {
long ingestTimeInMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTimeInNanos);
totalStats.postIngest(ingestTimeInMillis);
pipelineStats.ifPresent(statsHolder -> statsHolder.postIngest(ingestTimeInMillis));
}
} }
private Pipeline getPipeline(String pipelineId) { private Pipeline getPipeline(String pipelineId) {
@ -121,4 +190,30 @@ public class PipelineExecutionService {
} }
return pipeline; return pipeline;
} }
static class StatsHolder {
private final MeanMetric ingestMetric = new MeanMetric();
private final CounterMetric ingestCurrent = new CounterMetric();
private final CounterMetric ingestFailed = new CounterMetric();
void preIngest() {
ingestCurrent.inc();
}
void postIngest(long ingestTimeInMillis) {
ingestCurrent.dec();
ingestMetric.inc(ingestTimeInMillis);
}
void ingestFailed() {
ingestFailed.inc();
}
IngestStats.Stats createStats() {
return new IngestStats.Stats(ingestMetric.count(), ingestMetric.sum(), ingestCurrent.count(), ingestFailed.count());
}
}
} }

View File

@ -34,8 +34,10 @@ import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.ClusterStateObserver;
import org.elasticsearch.cluster.MasterNodeChangePredicate; import org.elasticsearch.cluster.MasterNodeChangePredicate;
import org.elasticsearch.cluster.NodeConnectionsService;
import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodeService;
import org.elasticsearch.cluster.routing.RoutingService; import org.elasticsearch.cluster.routing.RoutingService;
import org.elasticsearch.cluster.service.InternalClusterService; import org.elasticsearch.cluster.service.InternalClusterService;
import org.elasticsearch.common.StopWatch; import org.elasticsearch.common.StopWatch;
@ -294,6 +296,10 @@ public class Node implements Closeable {
"node cluster service implementation must inherit from InternalClusterService"; "node cluster service implementation must inherit from InternalClusterService";
final InternalClusterService clusterService = (InternalClusterService) injector.getInstance(ClusterService.class); final InternalClusterService clusterService = (InternalClusterService) injector.getInstance(ClusterService.class);
final NodeConnectionsService nodeConnectionsService = injector.getInstance(NodeConnectionsService.class);
nodeConnectionsService.start();
clusterService.setNodeConnectionsService(nodeConnectionsService);
// TODO hack around circular dependencies problems // TODO hack around circular dependencies problems
injector.getInstance(GatewayAllocator.class).setReallocation(clusterService, injector.getInstance(RoutingService.class)); injector.getInstance(GatewayAllocator.class).setReallocation(clusterService, injector.getInstance(RoutingService.class));
@ -311,6 +317,15 @@ public class Node implements Closeable {
// Start the transport service now so the publish address will be added to the local disco node in ClusterService // Start the transport service now so the publish address will be added to the local disco node in ClusterService
TransportService transportService = injector.getInstance(TransportService.class); TransportService transportService = injector.getInstance(TransportService.class);
transportService.start(); transportService.start();
DiscoveryNode localNode = injector.getInstance(DiscoveryNodeService.class)
.buildLocalNode(transportService.boundAddress().publishAddress());
// TODO: need to find a cleaner way to start/construct a service with some initial parameters,
// playing nice with the life cycle interfaces
clusterService.setLocalNode(localNode);
transportService.setLocalNode(localNode);
clusterService.add(transportService.getTaskManager());
clusterService.start(); clusterService.start();
// start after cluster service so the local disco is known // start after cluster service so the local disco is known
@ -392,6 +407,7 @@ public class Node implements Closeable {
injector.getInstance(RoutingService.class).stop(); injector.getInstance(RoutingService.class).stop();
injector.getInstance(ClusterService.class).stop(); injector.getInstance(ClusterService.class).stop();
injector.getInstance(Discovery.class).stop(); injector.getInstance(Discovery.class).stop();
injector.getInstance(NodeConnectionsService.class).stop();
injector.getInstance(MonitorService.class).stop(); injector.getInstance(MonitorService.class).stop();
injector.getInstance(GatewayService.class).stop(); injector.getInstance(GatewayService.class).stop();
injector.getInstance(SearchService.class).stop(); injector.getInstance(SearchService.class).stop();
@ -449,6 +465,8 @@ public class Node implements Closeable {
toClose.add(injector.getInstance(RoutingService.class)); toClose.add(injector.getInstance(RoutingService.class));
toClose.add(() -> stopWatch.stop().start("cluster")); toClose.add(() -> stopWatch.stop().start("cluster"));
toClose.add(injector.getInstance(ClusterService.class)); toClose.add(injector.getInstance(ClusterService.class));
toClose.add(() -> stopWatch.stop().start("node_connections_service"));
toClose.add(injector.getInstance(NodeConnectionsService.class));
toClose.add(() -> stopWatch.stop().start("discovery")); toClose.add(() -> stopWatch.stop().start("discovery"));
toClose.add(injector.getInstance(Discovery.class)); toClose.add(injector.getInstance(Discovery.class));
toClose.add(() -> stopWatch.stop().start("monitor")); toClose.add(() -> stopWatch.stop().start("monitor"));

View File

@ -90,6 +90,7 @@ public class NodeService extends AbstractComponent implements Closeable {
this.ingestService = new IngestService(settings, threadPool, processorsRegistryBuilder); this.ingestService = new IngestService(settings, threadPool, processorsRegistryBuilder);
this.settingsFilter = settingsFilter; this.settingsFilter = settingsFilter;
clusterService.add(ingestService.getPipelineStore()); clusterService.add(ingestService.getPipelineStore());
clusterService.add(ingestService.getPipelineExecutionService());
} }
// can not use constructor injection or there will be a circular dependency // can not use constructor injection or there will be a circular dependency
@ -165,13 +166,14 @@ public class NodeService extends AbstractComponent implements Closeable {
httpServer == null ? null : httpServer.stats(), httpServer == null ? null : httpServer.stats(),
circuitBreakerService.stats(), circuitBreakerService.stats(),
scriptService.stats(), scriptService.stats(),
discovery.stats() discovery.stats(),
ingestService.getPipelineExecutionService().stats()
); );
} }
public NodeStats stats(CommonStatsFlags indices, boolean os, boolean process, boolean jvm, boolean threadPool, public NodeStats stats(CommonStatsFlags indices, boolean os, boolean process, boolean jvm, boolean threadPool,
boolean fs, boolean transport, boolean http, boolean circuitBreaker, boolean fs, boolean transport, boolean http, boolean circuitBreaker,
boolean script, boolean discoveryStats) { boolean script, boolean discoveryStats, boolean ingest) {
// for indices stats we want to include previous allocated shards stats as well (it will // for indices stats we want to include previous allocated shards stats as well (it will
// only be applied to the sensible ones to use, like refresh/merge/flush/indexing stats) // only be applied to the sensible ones to use, like refresh/merge/flush/indexing stats)
return new NodeStats(discovery.localNode(), System.currentTimeMillis(), return new NodeStats(discovery.localNode(), System.currentTimeMillis(),
@ -185,7 +187,8 @@ public class NodeService extends AbstractComponent implements Closeable {
http ? (httpServer == null ? null : httpServer.stats()) : null, http ? (httpServer == null ? null : httpServer.stats()) : null,
circuitBreaker ? circuitBreakerService.stats() : null, circuitBreaker ? circuitBreakerService.stats() : null,
script ? scriptService.stats() : null, script ? scriptService.stats() : null,
discoveryStats ? discovery.stats() : null discoveryStats ? discovery.stats() : null,
ingest ? ingestService.getPipelineExecutionService().stats() : null
); );
} }

View File

@ -81,6 +81,7 @@ public class RestNodesStatsAction extends BaseRestHandler {
nodesStatsRequest.breaker(metrics.contains("breaker")); nodesStatsRequest.breaker(metrics.contains("breaker"));
nodesStatsRequest.script(metrics.contains("script")); nodesStatsRequest.script(metrics.contains("script"));
nodesStatsRequest.discovery(metrics.contains("discovery")); nodesStatsRequest.discovery(metrics.contains("discovery"));
nodesStatsRequest.ingest(metrics.contains("ingest"));
// check for index specific metrics // check for index specific metrics
if (metrics.contains("indices")) { if (metrics.contains("indices")) {
@ -113,6 +114,6 @@ public class RestNodesStatsAction extends BaseRestHandler {
nodesStatsRequest.indices().includeSegmentFileSizes(true); nodesStatsRequest.indices().includeSegmentFileSizes(true);
} }
client.admin().cluster().nodesStats(nodesStatsRequest, new RestToXContentListener<NodesStatsResponse>(channel)); client.admin().cluster().nodesStats(nodesStatsRequest, new RestToXContentListener<>(channel));
} }
} }

View File

@ -55,7 +55,6 @@ import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.env.Environment; import org.elasticsearch.env.Environment;
import org.elasticsearch.index.query.TemplateQueryParser; import org.elasticsearch.index.query.TemplateQueryParser;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.search.lookup.SearchLookup;
import org.elasticsearch.watcher.FileChangesListener; import org.elasticsearch.watcher.FileChangesListener;
import org.elasticsearch.watcher.FileWatcher; import org.elasticsearch.watcher.FileWatcher;
@ -225,6 +224,8 @@ public class ScriptService extends AbstractComponent implements Closeable {
return scriptEngineService; return scriptEngineService;
} }
/** /**
* Checks if a script can be executed and compiles it if needed, or returns the previously compiled and cached script. * Checks if a script can be executed and compiles it if needed, or returns the previously compiled and cached script.
*/ */
@ -516,46 +517,53 @@ public class ScriptService extends AbstractComponent implements Closeable {
private class ScriptChangesListener extends FileChangesListener { private class ScriptChangesListener extends FileChangesListener {
private Tuple<String, String> scriptNameExt(Path file) { private Tuple<String, String> getScriptNameExt(Path file) {
Path scriptPath = scriptsDirectory.relativize(file); Path scriptPath = scriptsDirectory.relativize(file);
int extIndex = scriptPath.toString().lastIndexOf('.'); int extIndex = scriptPath.toString().lastIndexOf('.');
if (extIndex != -1) { if (extIndex <= 0) {
String ext = scriptPath.toString().substring(extIndex + 1);
String scriptName = scriptPath.toString().substring(0, extIndex).replace(scriptPath.getFileSystem().getSeparator(), "_");
return new Tuple<>(scriptName, ext);
} else {
return null; return null;
} }
String ext = scriptPath.toString().substring(extIndex + 1);
if (ext.isEmpty()) {
return null;
}
String scriptName = scriptPath.toString().substring(0, extIndex).replace(scriptPath.getFileSystem().getSeparator(), "_");
return new Tuple<>(scriptName, ext);
} }
@Override @Override
public void onFileInit(Path file) { public void onFileInit(Path file) {
Tuple<String, String> scriptNameExt = getScriptNameExt(file);
if (scriptNameExt == null) {
logger.debug("Skipped script with invalid extension : [{}]", file);
return;
}
if (logger.isTraceEnabled()) { if (logger.isTraceEnabled()) {
logger.trace("Loading script file : [{}]", file); logger.trace("Loading script file : [{}]", file);
} }
Tuple<String, String> scriptNameExt = scriptNameExt(file);
if (scriptNameExt != null) { ScriptEngineService engineService = getScriptEngineServiceForFileExt(scriptNameExt.v2());
ScriptEngineService engineService = getScriptEngineServiceForFileExt(scriptNameExt.v2()); if (engineService == null) {
if (engineService == null) { logger.warn("No script engine found for [{}]", scriptNameExt.v2());
logger.warn("no script engine found for [{}]", scriptNameExt.v2()); } else {
} else { try {
try { //we don't know yet what the script will be used for, but if all of the operations for this lang
//we don't know yet what the script will be used for, but if all of the operations for this lang // with file scripts are disabled, it makes no sense to even compile it and cache it.
// with file scripts are disabled, it makes no sense to even compile it and cache it. if (isAnyScriptContextEnabled(engineService.getTypes().get(0), engineService, ScriptType.FILE)) {
if (isAnyScriptContextEnabled(engineService.getTypes().get(0), engineService, ScriptType.FILE)) { logger.info("compiling script file [{}]", file.toAbsolutePath());
logger.info("compiling script file [{}]", file.toAbsolutePath()); try (InputStreamReader reader = new InputStreamReader(Files.newInputStream(file), StandardCharsets.UTF_8)) {
try(InputStreamReader reader = new InputStreamReader(Files.newInputStream(file), StandardCharsets.UTF_8)) { String script = Streams.copyToString(reader);
String script = Streams.copyToString(reader); CacheKey cacheKey = new CacheKey(engineService, scriptNameExt.v1(), null, Collections.emptyMap());
CacheKey cacheKey = new CacheKey(engineService, scriptNameExt.v1(), null, Collections.emptyMap()); staticCache.put(cacheKey, new CompiledScript(ScriptType.FILE, scriptNameExt.v1(), engineService.getTypes().get(0), engineService.compile(script, Collections.emptyMap())));
staticCache.put(cacheKey, new CompiledScript(ScriptType.FILE, scriptNameExt.v1(), engineService.getTypes().get(0), engineService.compile(script, Collections.emptyMap()))); scriptMetrics.onCompilation();
scriptMetrics.onCompilation();
}
} else {
logger.warn("skipping compile of script file [{}] as all scripted operations are disabled for file scripts", file.toAbsolutePath());
} }
} catch (Throwable e) { } else {
logger.warn("failed to load/compile script [{}]", e, scriptNameExt.v1()); logger.warn("skipping compile of script file [{}] as all scripted operations are disabled for file scripts", file.toAbsolutePath());
} }
} catch (Throwable e) {
logger.warn("failed to load/compile script [{}]", e, scriptNameExt.v1());
} }
} }
} }
@ -567,7 +575,7 @@ public class ScriptService extends AbstractComponent implements Closeable {
@Override @Override
public void onFileDeleted(Path file) { public void onFileDeleted(Path file) {
Tuple<String, String> scriptNameExt = scriptNameExt(file); Tuple<String, String> scriptNameExt = getScriptNameExt(file);
if (scriptNameExt != null) { if (scriptNameExt != null) {
ScriptEngineService engineService = getScriptEngineServiceForFileExt(scriptNameExt.v2()); ScriptEngineService engineService = getScriptEngineServiceForFileExt(scriptNameExt.v2());
assert engineService != null; assert engineService != null;

View File

@ -45,7 +45,7 @@ public class SearchException extends ElasticsearchException implements Elasticse
public SearchException(StreamInput in) throws IOException { public SearchException(StreamInput in) throws IOException {
super(in); super(in);
if (in.readBoolean()) { if (in.readBoolean()) {
shardTarget = SearchShardTarget.readSearchShardTarget(in); shardTarget = new SearchShardTarget(in);
} else { } else {
shardTarget = null; shardTarget = null;
} }
@ -54,7 +54,12 @@ public class SearchException extends ElasticsearchException implements Elasticse
@Override @Override
public void writeTo(StreamOutput out) throws IOException { public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out); super.writeTo(out);
out.writeOptionalStreamable(shardTarget); if (shardTarget == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
shardTarget.writeTo(out);
}
} }
public SearchShardTarget shard() { public SearchShardTarget shard() {

View File

@ -531,10 +531,9 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
} }
final SearchContext createContext(ShardSearchRequest request, @Nullable Engine.Searcher searcher) { final SearchContext createContext(ShardSearchRequest request, @Nullable Engine.Searcher searcher) {
IndexService indexService = indicesService.indexServiceSafe(request.index()); IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
IndexShard indexShard = indexService.getShard(request.shardId()); IndexShard indexShard = indexService.getShard(request.shardId().getId());
SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().id(), indexShard.shardId());
SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().id(), indexShard.shardId().getIndex(), request.shardId());
Engine.Searcher engineSearcher = searcher == null ? indexShard.acquireSearcher("search") : searcher; Engine.Searcher engineSearcher = searcher == null ? indexShard.acquireSearcher("search") : searcher;

View File

@ -23,28 +23,38 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.text.Text; import org.elasticsearch.common.text.Text;
import org.elasticsearch.index.Index; import org.elasticsearch.index.Index;
import org.elasticsearch.index.shard.ShardId;
import java.io.IOException; import java.io.IOException;
/** /**
* The target that the search request was executed on. * The target that the search request was executed on.
*/ */
public class SearchShardTarget implements Streamable, Comparable<SearchShardTarget> { public class SearchShardTarget implements Writeable<SearchShardTarget>, Comparable<SearchShardTarget> {
private Text nodeId; private Text nodeId;
private Text index; private Text index;
private int shardId; private ShardId shardId;
private SearchShardTarget() { public SearchShardTarget(StreamInput in) throws IOException {
if (in.readBoolean()) {
nodeId = in.readText();
}
shardId = ShardId.readShardId(in);
index = new Text(shardId.getIndexName());
}
public SearchShardTarget(String nodeId, ShardId shardId) {
this.nodeId = nodeId == null ? null : new Text(nodeId);
this.index = new Text(shardId.getIndexName());
this.shardId = shardId;
} }
public SearchShardTarget(String nodeId, Index index, int shardId) { public SearchShardTarget(String nodeId, Index index, int shardId) {
this.nodeId = nodeId == null ? null : new Text(nodeId); this(nodeId, new ShardId(index, shardId));
this.index = new Text(index.getName());
this.shardId = shardId;
} }
@Nullable @Nullable
@ -73,36 +83,26 @@ public class SearchShardTarget implements Streamable, Comparable<SearchShardTarg
return this.index; return this.index;
} }
public int shardId() { public ShardId shardId() {
return shardId; return shardId;
} }
public int getShardId() { public ShardId getShardId() {
return shardId; return shardId;
} }
public static SearchShardTarget readSearchShardTarget(StreamInput in) throws IOException {
SearchShardTarget result = new SearchShardTarget();
result.readFrom(in);
return result;
}
@Override @Override
public int compareTo(SearchShardTarget o) { public int compareTo(SearchShardTarget o) {
int i = index.string().compareTo(o.index()); int i = index.string().compareTo(o.index());
if (i == 0) { if (i == 0) {
i = shardId - o.shardId; i = shardId.getId() - o.shardId.id();
} }
return i; return i;
} }
@Override @Override
public void readFrom(StreamInput in) throws IOException { public SearchShardTarget readFrom(StreamInput in) throws IOException {
if (in.readBoolean()) { return new SearchShardTarget(in);
nodeId = in.readText();
}
index = in.readText();
shardId = in.readVInt();
} }
@Override @Override
@ -113,19 +113,15 @@ public class SearchShardTarget implements Streamable, Comparable<SearchShardTarg
out.writeBoolean(true); out.writeBoolean(true);
out.writeText(nodeId); out.writeText(nodeId);
} }
out.writeText(index); shardId.writeTo(out);
out.writeVInt(shardId);
} }
@Override @Override
public boolean equals(Object o) { public boolean equals(Object o) {
if (this == o) return true; if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false; if (o == null || getClass() != o.getClass()) return false;
SearchShardTarget that = (SearchShardTarget) o; SearchShardTarget that = (SearchShardTarget) o;
if (shardId.equals(that.shardId) == false) return false;
if (shardId != that.shardId) return false;
if (index != null ? !index.equals(that.index) : that.index != null) return false;
if (nodeId != null ? !nodeId.equals(that.nodeId) : that.nodeId != null) return false; if (nodeId != null ? !nodeId.equals(that.nodeId) : that.nodeId != null) return false;
return true; return true;
@ -135,7 +131,7 @@ public class SearchShardTarget implements Streamable, Comparable<SearchShardTarg
public int hashCode() { public int hashCode() {
int result = nodeId != null ? nodeId.hashCode() : 0; int result = nodeId != null ? nodeId.hashCode() : 0;
result = 31 * result + (index != null ? index.hashCode() : 0); result = 31 * result + (index != null ? index.hashCode() : 0);
result = 31 * result + shardId; result = 31 * result + shardId.hashCode();
return result; return result;
} }

View File

@ -76,7 +76,7 @@ public class SearchPhaseController extends AbstractComponent {
public int compare(AtomicArray.Entry<? extends QuerySearchResultProvider> o1, AtomicArray.Entry<? extends QuerySearchResultProvider> o2) { public int compare(AtomicArray.Entry<? extends QuerySearchResultProvider> o1, AtomicArray.Entry<? extends QuerySearchResultProvider> o2) {
int i = o1.value.shardTarget().index().compareTo(o2.value.shardTarget().index()); int i = o1.value.shardTarget().index().compareTo(o2.value.shardTarget().index());
if (i == 0) { if (i == 0) {
i = o1.value.shardTarget().shardId() - o2.value.shardTarget().shardId(); i = o1.value.shardTarget().shardId().id() - o2.value.shardTarget().shardId().id();
} }
return i; return i;
} }

View File

@ -26,7 +26,6 @@ import org.elasticsearch.transport.TransportResponse;
import java.io.IOException; import java.io.IOException;
import static org.elasticsearch.search.SearchShardTarget.readSearchShardTarget;
import static org.elasticsearch.search.fetch.QueryFetchSearchResult.readQueryFetchSearchResult; import static org.elasticsearch.search.fetch.QueryFetchSearchResult.readQueryFetchSearchResult;
/** /**
@ -56,7 +55,7 @@ public class ScrollQueryFetchSearchResult extends TransportResponse {
@Override @Override
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) throws IOException {
super.readFrom(in); super.readFrom(in);
shardTarget = readSearchShardTarget(in); shardTarget = new SearchShardTarget(in);
result = readQueryFetchSearchResult(in); result = readQueryFetchSearchResult(in);
result.shardTarget(shardTarget); result.shardTarget(shardTarget);
} }

View File

@ -55,7 +55,6 @@ import static java.util.Collections.singletonMap;
import static java.util.Collections.unmodifiableMap; import static java.util.Collections.unmodifiableMap;
import static org.elasticsearch.common.lucene.Lucene.readExplanation; import static org.elasticsearch.common.lucene.Lucene.readExplanation;
import static org.elasticsearch.common.lucene.Lucene.writeExplanation; import static org.elasticsearch.common.lucene.Lucene.writeExplanation;
import static org.elasticsearch.search.SearchShardTarget.readSearchShardTarget;
import static org.elasticsearch.search.highlight.HighlightField.readHighlightField; import static org.elasticsearch.search.highlight.HighlightField.readHighlightField;
import static org.elasticsearch.search.internal.InternalSearchHitField.readSearchHitField; import static org.elasticsearch.search.internal.InternalSearchHitField.readSearchHitField;
@ -638,7 +637,7 @@ public class InternalSearchHit implements SearchHit {
if (context.streamShardTarget() == ShardTargetType.STREAM) { if (context.streamShardTarget() == ShardTargetType.STREAM) {
if (in.readBoolean()) { if (in.readBoolean()) {
shard = readSearchShardTarget(in); shard = new SearchShardTarget(in);
} }
} else if (context.streamShardTarget() == ShardTargetType.LOOKUP) { } else if (context.streamShardTarget() == ShardTargetType.LOOKUP) {
int lookupId = in.readVInt(); int lookupId = in.readVInt();

View File

@ -34,7 +34,6 @@ import java.util.IdentityHashMap;
import java.util.Iterator; import java.util.Iterator;
import java.util.Map; import java.util.Map;
import static org.elasticsearch.search.SearchShardTarget.readSearchShardTarget;
import static org.elasticsearch.search.internal.InternalSearchHit.readSearchHit; import static org.elasticsearch.search.internal.InternalSearchHit.readSearchHit;
/** /**
@ -216,7 +215,7 @@ public class InternalSearchHits implements SearchHits {
// read the lookup table first // read the lookup table first
int lookupSize = in.readVInt(); int lookupSize = in.readVInt();
for (int i = 0; i < lookupSize; i++) { for (int i = 0; i < lookupSize; i++) {
context.handleShardLookup().put(in.readVInt(), readSearchShardTarget(in)); context.handleShardLookup().put(in.readVInt(), new SearchShardTarget(in));
} }
} }
@ -262,4 +261,4 @@ public class InternalSearchHits implements SearchHits {
} }
} }
} }
} }

View File

@ -58,8 +58,7 @@ import static org.elasticsearch.search.Scroll.readScroll;
public class ShardSearchLocalRequest implements ShardSearchRequest { public class ShardSearchLocalRequest implements ShardSearchRequest {
private String index; private ShardId shardId;
private int shardId;
private int numberOfShards; private int numberOfShards;
private SearchType searchType; private SearchType searchType;
private Scroll scroll; private Scroll scroll;
@ -97,8 +96,7 @@ public class ShardSearchLocalRequest implements ShardSearchRequest {
public ShardSearchLocalRequest(ShardId shardId, int numberOfShards, SearchType searchType, SearchSourceBuilder source, String[] types, public ShardSearchLocalRequest(ShardId shardId, int numberOfShards, SearchType searchType, SearchSourceBuilder source, String[] types,
Boolean requestCache) { Boolean requestCache) {
this.index = shardId.getIndexName(); this.shardId = shardId;
this.shardId = shardId.id();
this.numberOfShards = numberOfShards; this.numberOfShards = numberOfShards;
this.searchType = searchType; this.searchType = searchType;
this.source = source; this.source = source;
@ -106,13 +104,9 @@ public class ShardSearchLocalRequest implements ShardSearchRequest {
this.requestCache = requestCache; this.requestCache = requestCache;
} }
@Override
public String index() {
return index;
}
@Override @Override
public int shardId() { public ShardId shardId() {
return shardId; return shardId;
} }
@ -177,8 +171,7 @@ public class ShardSearchLocalRequest implements ShardSearchRequest {
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
protected void innerReadFrom(StreamInput in) throws IOException { protected void innerReadFrom(StreamInput in) throws IOException {
index = in.readString(); shardId = ShardId.readShardId(in);
shardId = in.readVInt();
searchType = SearchType.fromId(in.readByte()); searchType = SearchType.fromId(in.readByte());
numberOfShards = in.readVInt(); numberOfShards = in.readVInt();
if (in.readBoolean()) { if (in.readBoolean()) {
@ -195,8 +188,7 @@ public class ShardSearchLocalRequest implements ShardSearchRequest {
} }
protected void innerWriteTo(StreamOutput out, boolean asKey) throws IOException { protected void innerWriteTo(StreamOutput out, boolean asKey) throws IOException {
out.writeString(index); shardId.writeTo(out);
out.writeVInt(shardId);
out.writeByte(searchType.id()); out.writeByte(searchType.id());
if (!asKey) { if (!asKey) {
out.writeVInt(numberOfShards); out.writeVInt(numberOfShards);

View File

@ -21,6 +21,7 @@ package org.elasticsearch.search.internal;
import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.script.Template; import org.elasticsearch.script.Template;
import org.elasticsearch.search.Scroll; import org.elasticsearch.search.Scroll;
import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder;
@ -34,9 +35,7 @@ import java.io.IOException;
*/ */
public interface ShardSearchRequest { public interface ShardSearchRequest {
String index(); ShardId shardId();
int shardId();
String[] types(); String[] types();

View File

@ -28,6 +28,7 @@ import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.script.Template; import org.elasticsearch.script.Template;
import org.elasticsearch.search.Scroll; import org.elasticsearch.search.Scroll;
import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder;
@ -71,13 +72,9 @@ public class ShardSearchTransportRequest extends TransportRequest implements Sha
return originalIndices.indicesOptions(); return originalIndices.indicesOptions();
} }
@Override
public String index() {
return shardSearchLocalRequest.index();
}
@Override @Override
public int shardId() { public ShardId shardId() {
return shardSearchLocalRequest.shardId(); return shardSearchLocalRequest.shardId();
} }

View File

@ -26,7 +26,6 @@ import org.elasticsearch.transport.TransportResponse;
import java.io.IOException; import java.io.IOException;
import static org.elasticsearch.search.SearchShardTarget.readSearchShardTarget;
import static org.elasticsearch.search.query.QuerySearchResult.readQuerySearchResult; import static org.elasticsearch.search.query.QuerySearchResult.readQuerySearchResult;
/** /**
@ -56,7 +55,7 @@ public class ScrollQuerySearchResult extends TransportResponse {
@Override @Override
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) throws IOException {
super.readFrom(in); super.readFrom(in);
shardTarget = readSearchShardTarget(in); shardTarget = new SearchShardTarget(in);
queryResult = readQuerySearchResult(in); queryResult = readQuerySearchResult(in);
queryResult.shardTarget(shardTarget); queryResult.shardTarget(shardTarget);
} }

View File

@ -27,12 +27,10 @@ import java.io.IOException;
/** /**
* A sort builder to sort based on a document field. * A sort builder to sort based on a document field.
*/ */
public class FieldSortBuilder extends SortBuilder { public class FieldSortBuilder extends SortBuilder<FieldSortBuilder> {
private final String fieldName; private final String fieldName;
private SortOrder order;
private Object missing; private Object missing;
private String unmappedType; private String unmappedType;
@ -55,20 +53,10 @@ public class FieldSortBuilder extends SortBuilder {
this.fieldName = fieldName; this.fieldName = fieldName;
} }
/**
* The order of sorting. Defaults to {@link SortOrder#ASC}.
*/
@Override
public FieldSortBuilder order(SortOrder order) {
this.order = order;
return this;
}
/** /**
* Sets the value when a field is missing in a doc. Can also be set to <tt>_last</tt> or * Sets the value when a field is missing in a doc. Can also be set to <tt>_last</tt> or
* <tt>_first</tt> to sort missing last or first respectively. * <tt>_first</tt> to sort missing last or first respectively.
*/ */
@Override
public FieldSortBuilder missing(Object missing) { public FieldSortBuilder missing(Object missing) {
this.missing = missing; this.missing = missing;
return this; return this;
@ -119,9 +107,7 @@ public class FieldSortBuilder extends SortBuilder {
@Override @Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(fieldName); builder.startObject(fieldName);
if (order != null) { builder.field(ORDER_FIELD.getPreferredName(), order);
builder.field("order", order.toString());
}
if (missing != null) { if (missing != null) {
builder.field("missing", missing); builder.field("missing", missing);
} }

View File

@ -44,7 +44,7 @@ import java.util.Objects;
/** /**
* A geo distance based sorting on a geo point like field. * A geo distance based sorting on a geo point like field.
*/ */
public class GeoDistanceSortBuilder extends SortBuilder public class GeoDistanceSortBuilder extends SortBuilder<GeoDistanceSortBuilder>
implements ToXContent, NamedWriteable<GeoDistanceSortBuilder>, SortElementParserTemp<GeoDistanceSortBuilder> { implements ToXContent, NamedWriteable<GeoDistanceSortBuilder>, SortElementParserTemp<GeoDistanceSortBuilder> {
public static final String NAME = "_geo_distance"; public static final String NAME = "_geo_distance";
public static final boolean DEFAULT_COERCE = false; public static final boolean DEFAULT_COERCE = false;
@ -57,14 +57,13 @@ public class GeoDistanceSortBuilder extends SortBuilder
private GeoDistance geoDistance = GeoDistance.DEFAULT; private GeoDistance geoDistance = GeoDistance.DEFAULT;
private DistanceUnit unit = DistanceUnit.DEFAULT; private DistanceUnit unit = DistanceUnit.DEFAULT;
private SortOrder order = SortOrder.ASC;
// TODO there is an enum that covers that parameter which we should be using here // TODO there is an enum that covers that parameter which we should be using here
private String sortMode = null; private String sortMode = null;
@SuppressWarnings("rawtypes") @SuppressWarnings("rawtypes")
private QueryBuilder nestedFilter; private QueryBuilder nestedFilter;
private String nestedPath; private String nestedPath;
// TODO switch to GeoValidationMethod enum // TODO switch to GeoValidationMethod enum
private boolean coerce = DEFAULT_COERCE; private boolean coerce = DEFAULT_COERCE;
private boolean ignoreMalformed = DEFAULT_IGNORE_MALFORMED; private boolean ignoreMalformed = DEFAULT_IGNORE_MALFORMED;
@ -109,7 +108,7 @@ public class GeoDistanceSortBuilder extends SortBuilder
} }
this.fieldName = fieldName; this.fieldName = fieldName;
} }
/** /**
* Copy constructor. * Copy constructor.
* */ * */
@ -125,7 +124,7 @@ public class GeoDistanceSortBuilder extends SortBuilder
this.coerce = original.coerce; this.coerce = original.coerce;
this.ignoreMalformed = original.ignoreMalformed; this.ignoreMalformed = original.ignoreMalformed;
} }
/** /**
* Returns the geo point like field the distance based sort operates on. * Returns the geo point like field the distance based sort operates on.
* */ * */
@ -153,7 +152,7 @@ public class GeoDistanceSortBuilder extends SortBuilder
this.points.addAll(Arrays.asList(points)); this.points.addAll(Arrays.asList(points));
return this; return this;
} }
/** /**
* Returns the points to create the range distance facets from. * Returns the points to create the range distance facets from.
*/ */
@ -163,7 +162,7 @@ public class GeoDistanceSortBuilder extends SortBuilder
/** /**
* The geohash of the geo point to create the range distance facets from. * The geohash of the geo point to create the range distance facets from.
* *
* Deprecated - please use points(GeoPoint... points) instead. * Deprecated - please use points(GeoPoint... points) instead.
*/ */
@Deprecated @Deprecated
@ -173,7 +172,7 @@ public class GeoDistanceSortBuilder extends SortBuilder
} }
return this; return this;
} }
/** /**
* The geo distance type used to compute the distance. * The geo distance type used to compute the distance.
*/ */
@ -181,7 +180,7 @@ public class GeoDistanceSortBuilder extends SortBuilder
this.geoDistance = geoDistance; this.geoDistance = geoDistance;
return this; return this;
} }
/** /**
* Returns the geo distance type used to compute the distance. * Returns the geo distance type used to compute the distance.
*/ */
@ -204,30 +203,6 @@ public class GeoDistanceSortBuilder extends SortBuilder
return this.unit; return this.unit;
} }
/**
* The order of sorting. Defaults to {@link SortOrder#ASC}.
*/
@Override
public GeoDistanceSortBuilder order(SortOrder order) {
this.order = order;
return this;
}
/** Returns the order of sorting. */
public SortOrder order() {
return this.order;
}
/**
* Not relevant.
*
* TODO should this throw an exception rather than silently ignore a parameter that is not used?
*/
@Override
public GeoDistanceSortBuilder missing(Object missing) {
return this;
}
/** /**
* Defines which distance to use for sorting in the case a document contains multiple geo points. * Defines which distance to use for sorting in the case a document contains multiple geo points.
* Possible values: min and max * Possible values: min and max
@ -250,16 +225,16 @@ public class GeoDistanceSortBuilder extends SortBuilder
* Sets the nested filter that the nested objects should match with in order to be taken into account * Sets the nested filter that the nested objects should match with in order to be taken into account
* for sorting. * for sorting.
*/ */
public GeoDistanceSortBuilder setNestedFilter(QueryBuilder nestedFilter) { public GeoDistanceSortBuilder setNestedFilter(QueryBuilder<?> nestedFilter) {
this.nestedFilter = nestedFilter; this.nestedFilter = nestedFilter;
return this; return this;
} }
/** /**
* Returns the nested filter that the nested objects should match with in order to be taken into account * Returns the nested filter that the nested objects should match with in order to be taken into account
* for sorting. * for sorting.
**/ **/
public QueryBuilder getNestedFilter() { public QueryBuilder<?> getNestedFilter() {
return this.nestedFilter; return this.nestedFilter;
} }
@ -271,7 +246,7 @@ public class GeoDistanceSortBuilder extends SortBuilder
this.nestedPath = nestedPath; this.nestedPath = nestedPath;
return this; return this;
} }
/** /**
* Returns the nested path if sorting occurs on a field that is inside a nested object. By default when sorting on a * Returns the nested path if sorting occurs on a field that is inside a nested object. By default when sorting on a
* field inside a nested object, the nearest upper nested object is selected as nested path. * field inside a nested object, the nearest upper nested object is selected as nested path.
@ -295,7 +270,7 @@ public class GeoDistanceSortBuilder extends SortBuilder
} }
return this; return this;
} }
public boolean ignoreMalformed() { public boolean ignoreMalformed() {
return this.ignoreMalformed; return this.ignoreMalformed;
} }
@ -312,11 +287,7 @@ public class GeoDistanceSortBuilder extends SortBuilder
builder.field("unit", unit); builder.field("unit", unit);
builder.field("distance_type", geoDistance.name().toLowerCase(Locale.ROOT)); builder.field("distance_type", geoDistance.name().toLowerCase(Locale.ROOT));
if (order == SortOrder.DESC) { builder.field(ORDER_FIELD.getPreferredName(), order);
builder.field("reverse", true);
} else {
builder.field("reverse", false);
}
if (sortMode != null) { if (sortMode != null) {
builder.field("mode", sortMode); builder.field("mode", sortMode);
@ -373,7 +344,7 @@ public class GeoDistanceSortBuilder extends SortBuilder
public void writeTo(StreamOutput out) throws IOException { public void writeTo(StreamOutput out) throws IOException {
out.writeString(fieldName); out.writeString(fieldName);
out.writeGenericValue(points); out.writeGenericValue(points);
geoDistance.writeTo(out); geoDistance.writeTo(out);
unit.writeTo(out); unit.writeTo(out);
order.writeTo(out); order.writeTo(out);
@ -392,10 +363,10 @@ public class GeoDistanceSortBuilder extends SortBuilder
@Override @Override
public GeoDistanceSortBuilder readFrom(StreamInput in) throws IOException { public GeoDistanceSortBuilder readFrom(StreamInput in) throws IOException {
String fieldName = in.readString(); String fieldName = in.readString();
ArrayList<GeoPoint> points = (ArrayList<GeoPoint>) in.readGenericValue(); ArrayList<GeoPoint> points = (ArrayList<GeoPoint>) in.readGenericValue();
GeoDistanceSortBuilder result = new GeoDistanceSortBuilder(fieldName, points.toArray(new GeoPoint[points.size()])); GeoDistanceSortBuilder result = new GeoDistanceSortBuilder(fieldName, points.toArray(new GeoPoint[points.size()]));
result.geoDistance(GeoDistance.readGeoDistanceFrom(in)); result.geoDistance(GeoDistance.readGeoDistanceFrom(in));
result.unit(DistanceUnit.readDistanceUnit(in)); result.unit(DistanceUnit.readDistanceUnit(in));
result.order(SortOrder.readOrderFrom(in)); result.order(SortOrder.readOrderFrom(in));
@ -419,9 +390,9 @@ public class GeoDistanceSortBuilder extends SortBuilder
List<GeoPoint> geoPoints = new ArrayList<>(); List<GeoPoint> geoPoints = new ArrayList<>();
DistanceUnit unit = DistanceUnit.DEFAULT; DistanceUnit unit = DistanceUnit.DEFAULT;
GeoDistance geoDistance = GeoDistance.DEFAULT; GeoDistance geoDistance = GeoDistance.DEFAULT;
boolean reverse = false; SortOrder order = SortOrder.ASC;
MultiValueMode sortMode = null; MultiValueMode sortMode = null;
QueryBuilder nestedFilter = null; QueryBuilder<?> nestedFilter = null;
String nestedPath = null; String nestedPath = null;
boolean coerce = GeoDistanceSortBuilder.DEFAULT_COERCE; boolean coerce = GeoDistanceSortBuilder.DEFAULT_COERCE;
@ -439,8 +410,8 @@ public class GeoDistanceSortBuilder extends SortBuilder
} else if (token == XContentParser.Token.START_OBJECT) { } else if (token == XContentParser.Token.START_OBJECT) {
// the json in the format of -> field : { lat : 30, lon : 12 } // the json in the format of -> field : { lat : 30, lon : 12 }
if ("nested_filter".equals(currentName) || "nestedFilter".equals(currentName)) { if ("nested_filter".equals(currentName) || "nestedFilter".equals(currentName)) {
// TODO Note to remember: while this is kept as a QueryBuilder internally, // TODO Note to remember: while this is kept as a QueryBuilder internally,
// we need to make sure to call toFilter() on it once on the shard // we need to make sure to call toFilter() on it once on the shard
// (e.g. in the new build() method) // (e.g. in the new build() method)
nestedFilter = context.parseInnerQueryBuilder(); nestedFilter = context.parseInnerQueryBuilder();
} else { } else {
@ -451,9 +422,9 @@ public class GeoDistanceSortBuilder extends SortBuilder
} }
} else if (token.isValue()) { } else if (token.isValue()) {
if ("reverse".equals(currentName)) { if ("reverse".equals(currentName)) {
reverse = parser.booleanValue(); order = parser.booleanValue() ? SortOrder.DESC : SortOrder.ASC;
} else if ("order".equals(currentName)) { } else if ("order".equals(currentName)) {
reverse = "desc".equals(parser.text()); order = SortOrder.fromString(parser.text());
} else if ("unit".equals(currentName)) { } else if ("unit".equals(currentName)) {
unit = DistanceUnit.fromString(parser.text()); unit = DistanceUnit.fromString(parser.text());
} else if ("distance_type".equals(currentName) || "distanceType".equals(currentName)) { } else if ("distance_type".equals(currentName) || "distanceType".equals(currentName)) {
@ -484,11 +455,7 @@ public class GeoDistanceSortBuilder extends SortBuilder
GeoDistanceSortBuilder result = new GeoDistanceSortBuilder(fieldName, geoPoints.toArray(new GeoPoint[geoPoints.size()])); GeoDistanceSortBuilder result = new GeoDistanceSortBuilder(fieldName, geoPoints.toArray(new GeoPoint[geoPoints.size()]));
result.geoDistance(geoDistance); result.geoDistance(geoDistance);
result.unit(unit); result.unit(unit);
if (reverse) { result.order(order);
result.order(SortOrder.DESC);
} else {
result.order(SortOrder.ASC);
}
if (sortMode != null) { if (sortMode != null) {
result.sortMode(sortMode.name()); result.sortMode(sortMode.name());
} }

View File

@ -43,9 +43,9 @@ import org.elasticsearch.index.fielddata.MultiGeoPointValues;
import org.elasticsearch.index.fielddata.NumericDoubleValues; import org.elasticsearch.index.fielddata.NumericDoubleValues;
import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues;
import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.query.support.NestedInnerQueryParseSupport; import org.elasticsearch.index.query.support.NestedInnerQueryParseSupport;
import org.elasticsearch.search.MultiValueMode; import org.elasticsearch.search.MultiValueMode;
import org.elasticsearch.search.internal.SearchContext;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
@ -62,7 +62,7 @@ public class GeoDistanceSortParser implements SortParser {
} }
@Override @Override
public SortField parse(XContentParser parser, SearchContext context) throws Exception { public SortField parse(XContentParser parser, QueryShardContext context) throws Exception {
String fieldName = null; String fieldName = null;
List<GeoPoint> geoPoints = new ArrayList<>(); List<GeoPoint> geoPoints = new ArrayList<>();
DistanceUnit unit = DistanceUnit.DEFAULT; DistanceUnit unit = DistanceUnit.DEFAULT;
@ -71,7 +71,7 @@ public class GeoDistanceSortParser implements SortParser {
MultiValueMode sortMode = null; MultiValueMode sortMode = null;
NestedInnerQueryParseSupport nestedHelper = null; NestedInnerQueryParseSupport nestedHelper = null;
final boolean indexCreatedBeforeV2_0 = context.indexShard().indexSettings().getIndexVersionCreated().before(Version.V_2_0_0); final boolean indexCreatedBeforeV2_0 = context.indexVersionCreated().before(Version.V_2_0_0);
boolean coerce = GeoDistanceSortBuilder.DEFAULT_COERCE; boolean coerce = GeoDistanceSortBuilder.DEFAULT_COERCE;
boolean ignoreMalformed = GeoDistanceSortBuilder.DEFAULT_IGNORE_MALFORMED; boolean ignoreMalformed = GeoDistanceSortBuilder.DEFAULT_IGNORE_MALFORMED;
@ -155,12 +155,12 @@ public class GeoDistanceSortParser implements SortParser {
throw new IllegalArgumentException("sort_mode [sum] isn't supported for sorting by geo distance"); throw new IllegalArgumentException("sort_mode [sum] isn't supported for sorting by geo distance");
} }
MappedFieldType fieldType = context.smartNameFieldType(fieldName); MappedFieldType fieldType = context.fieldMapper(fieldName);
if (fieldType == null) { if (fieldType == null) {
throw new IllegalArgumentException("failed to find mapper for [" + fieldName + "] for geo distance based sort"); throw new IllegalArgumentException("failed to find mapper for [" + fieldName + "] for geo distance based sort");
} }
final MultiValueMode finalSortMode = sortMode; // final reference for use in the anonymous class final MultiValueMode finalSortMode = sortMode; // final reference for use in the anonymous class
final IndexGeoPointFieldData geoIndexFieldData = context.fieldData().getForField(fieldType); final IndexGeoPointFieldData geoIndexFieldData = context.getForField(fieldType);
final FixedSourceDistance[] distances = new FixedSourceDistance[geoPoints.size()]; final FixedSourceDistance[] distances = new FixedSourceDistance[geoPoints.size()];
for (int i = 0; i< geoPoints.size(); i++) { for (int i = 0; i< geoPoints.size(); i++) {
distances[i] = geoDistance.fixedSourceDistance(geoPoints.get(i).lat(), geoPoints.get(i).lon(), unit); distances[i] = geoDistance.fixedSourceDistance(geoPoints.get(i).lat(), geoPoints.get(i).lon(), unit);
@ -168,15 +168,16 @@ public class GeoDistanceSortParser implements SortParser {
final Nested nested; final Nested nested;
if (nestedHelper != null && nestedHelper.getPath() != null) { if (nestedHelper != null && nestedHelper.getPath() != null) {
BitSetProducer rootDocumentsFilter = context.bitsetFilterCache().getBitSetProducer(Queries.newNonNestedFilter()); BitSetProducer rootDocumentsFilter = context.bitsetFilter(Queries.newNonNestedFilter());
Query innerDocumentsFilter; Query innerDocumentsQuery;
if (nestedHelper.filterFound()) { if (nestedHelper.filterFound()) {
// TODO: use queries instead // TODO: use queries instead
innerDocumentsFilter = nestedHelper.getInnerFilter(); innerDocumentsQuery = nestedHelper.getInnerFilter();
} else { } else {
innerDocumentsFilter = nestedHelper.getNestedObjectMapper().nestedTypeFilter(); innerDocumentsQuery = nestedHelper.getNestedObjectMapper().nestedTypeFilter();
} }
nested = new Nested(rootDocumentsFilter, context.searcher().createNormalizedWeight(innerDocumentsFilter, false));
nested = new Nested(rootDocumentsFilter, innerDocumentsQuery);
} else { } else {
nested = null; nested = null;
} }

View File

@ -19,40 +19,103 @@
package org.elasticsearch.search.sort; package org.elasticsearch.search.sort;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.io.stream.NamedWriteable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.query.QueryParseContext;
import java.io.IOException; import java.io.IOException;
import java.util.Objects;
/** /**
* A sort builder allowing to sort by score. * A sort builder allowing to sort by score.
*
*
*/ */
public class ScoreSortBuilder extends SortBuilder { public class ScoreSortBuilder extends SortBuilder<ScoreSortBuilder> implements NamedWriteable<ScoreSortBuilder>,
SortElementParserTemp<ScoreSortBuilder> {
private SortOrder order; private static final String NAME = "_score";
static final ScoreSortBuilder PROTOTYPE = new ScoreSortBuilder();
public static final ParseField REVERSE_FIELD = new ParseField("reverse");
public static final ParseField ORDER_FIELD = new ParseField("order");
/** public ScoreSortBuilder() {
* The order of sort scoring. By default, its {@link SortOrder#DESC}. // order defaults to desc when sorting on the _score
*/ order(SortOrder.DESC);
@Override
public ScoreSortBuilder order(SortOrder order) {
this.order = order;
return this;
} }
@Override
public SortBuilder missing(Object missing) {
return this;
}
@Override @Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject("_score"); builder.startObject(NAME);
if (order == SortOrder.ASC) { builder.field(ORDER_FIELD.getPreferredName(), order);
builder.field("reverse", true);
}
builder.endObject(); builder.endObject();
return builder; return builder;
} }
@Override
public ScoreSortBuilder fromXContent(QueryParseContext context, String elementName) throws IOException {
XContentParser parser = context.parser();
ParseFieldMatcher matcher = context.parseFieldMatcher();
XContentParser.Token token;
String currentName = parser.currentName();
ScoreSortBuilder result = new ScoreSortBuilder();
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentName = parser.currentName();
} else if (token.isValue()) {
if (matcher.match(currentName, REVERSE_FIELD)) {
if (parser.booleanValue()) {
result.order(SortOrder.ASC);
}
// else we keep the default DESC
} else if (matcher.match(currentName, ORDER_FIELD)) {
result.order(SortOrder.fromString(parser.text()));
} else {
throw new ParsingException(parser.getTokenLocation(), "[" + NAME + "] failed to parse field [" + currentName + "]");
}
} else {
throw new ParsingException(parser.getTokenLocation(), "[" + NAME + "] unexpected token [" + token + "]");
}
}
return result;
}
@Override
public boolean equals(Object object) {
if (this == object) {
return true;
}
if (object == null || getClass() != object.getClass()) {
return false;
}
ScoreSortBuilder other = (ScoreSortBuilder) object;
return Objects.equals(order, other.order);
}
@Override
public int hashCode() {
return Objects.hash(this.order);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
order.writeTo(out);
}
@Override
public ScoreSortBuilder readFrom(StreamInput in) throws IOException {
ScoreSortBuilder builder = new ScoreSortBuilder().order(SortOrder.readOrderFrom(in));
return builder;
}
@Override
public String getWriteableName() {
return NAME;
}
} }

View File

@ -28,14 +28,12 @@ import java.io.IOException;
/** /**
* Script sort builder allows to sort based on a custom script expression. * Script sort builder allows to sort based on a custom script expression.
*/ */
public class ScriptSortBuilder extends SortBuilder { public class ScriptSortBuilder extends SortBuilder<ScriptSortBuilder> {
private Script script; private Script script;
private final String type; private final String type;
private SortOrder order;
private String sortMode; private String sortMode;
private QueryBuilder nestedFilter; private QueryBuilder nestedFilter;
@ -53,23 +51,6 @@ public class ScriptSortBuilder extends SortBuilder {
this.type = type; this.type = type;
} }
/**
* Sets the sort order.
*/
@Override
public ScriptSortBuilder order(SortOrder order) {
this.order = order;
return this;
}
/**
* Not really relevant.
*/
@Override
public SortBuilder missing(Object missing) {
return this;
}
/** /**
* Defines which distance to use for sorting in the case a document contains multiple geo points. * Defines which distance to use for sorting in the case a document contains multiple geo points.
* Possible values: min and max * Possible values: min and max
@ -83,7 +64,7 @@ public class ScriptSortBuilder extends SortBuilder {
* Sets the nested filter that the nested objects should match with in order to be taken into account * Sets the nested filter that the nested objects should match with in order to be taken into account
* for sorting. * for sorting.
*/ */
public ScriptSortBuilder setNestedFilter(QueryBuilder nestedFilter) { public ScriptSortBuilder setNestedFilter(QueryBuilder<?> nestedFilter) {
this.nestedFilter = nestedFilter; this.nestedFilter = nestedFilter;
return this; return this;
} }
@ -102,9 +83,7 @@ public class ScriptSortBuilder extends SortBuilder {
builder.startObject("_script"); builder.startObject("_script");
builder.field("script", script); builder.field("script", script);
builder.field("type", type); builder.field("type", type);
if (order == SortOrder.DESC) { builder.field(ORDER_FIELD.getPreferredName(), order);
builder.field("reverse", true);
}
if (sortMode != null) { if (sortMode != null) {
builder.field("mode", sortMode); builder.field("mode", sortMode);
} }

View File

@ -27,6 +27,7 @@ import org.apache.lucene.search.SortField;
import org.apache.lucene.search.join.BitSetProducer; import org.apache.lucene.search.join.BitSetProducer;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.BytesRefBuilder;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.fielddata.FieldData; import org.elasticsearch.index.fielddata.FieldData;
@ -37,6 +38,7 @@ import org.elasticsearch.index.fielddata.SortedBinaryDocValues;
import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues;
import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource; import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource;
import org.elasticsearch.index.fielddata.fieldcomparator.DoubleValuesComparatorSource; import org.elasticsearch.index.fielddata.fieldcomparator.DoubleValuesComparatorSource;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.query.support.NestedInnerQueryParseSupport; import org.elasticsearch.index.query.support.NestedInnerQueryParseSupport;
import org.elasticsearch.script.LeafSearchScript; import org.elasticsearch.script.LeafSearchScript;
import org.elasticsearch.script.Script; import org.elasticsearch.script.Script;
@ -68,7 +70,7 @@ public class ScriptSortParser implements SortParser {
} }
@Override @Override
public SortField parse(XContentParser parser, SearchContext context) throws Exception { public SortField parse(XContentParser parser, QueryShardContext context) throws Exception {
ScriptParameterParser scriptParameterParser = new ScriptParameterParser(); ScriptParameterParser scriptParameterParser = new ScriptParameterParser();
Script script = null; Script script = null;
String type = null; String type = null;
@ -122,19 +124,20 @@ public class ScriptSortParser implements SortParser {
script = new Script(scriptValue.script(), scriptValue.scriptType(), scriptParameterParser.lang(), params); script = new Script(scriptValue.script(), scriptValue.scriptType(), scriptParameterParser.lang(), params);
} }
} else if (params != null) { } else if (params != null) {
throw new SearchParseException(context, "script params must be specified inside script object", parser.getTokenLocation()); throw new ParsingException(parser.getTokenLocation(), "script params must be specified inside script object");
} }
if (script == null) { if (script == null) {
throw new SearchParseException(context, "_script sorting requires setting the script to sort by", parser.getTokenLocation()); throw new ParsingException(parser.getTokenLocation(), "_script sorting requires setting the script to sort by");
} }
if (type == null) { if (type == null) {
throw new SearchParseException(context, "_script sorting requires setting the type of the script", parser.getTokenLocation()); throw new ParsingException(parser.getTokenLocation(), "_script sorting requires setting the type of the script");
} }
final SearchScript searchScript = context.scriptService().search(context.lookup(), script, ScriptContext.Standard.SEARCH, Collections.emptyMap()); final SearchScript searchScript = context.getScriptService().search(
context.lookup(), script, ScriptContext.Standard.SEARCH, Collections.emptyMap());
if (STRING_SORT_TYPE.equals(type) && (sortMode == MultiValueMode.SUM || sortMode == MultiValueMode.AVG)) { if (STRING_SORT_TYPE.equals(type) && (sortMode == MultiValueMode.SUM || sortMode == MultiValueMode.AVG)) {
throw new SearchParseException(context, "type [string] doesn't support mode [" + sortMode + "]", parser.getTokenLocation()); throw new ParsingException(parser.getTokenLocation(), "type [string] doesn't support mode [" + sortMode + "]");
} }
if (sortMode == null) { if (sortMode == null) {
@ -144,7 +147,7 @@ public class ScriptSortParser implements SortParser {
// If nested_path is specified, then wrap the `fieldComparatorSource` in a `NestedFieldComparatorSource` // If nested_path is specified, then wrap the `fieldComparatorSource` in a `NestedFieldComparatorSource`
final Nested nested; final Nested nested;
if (nestedHelper != null && nestedHelper.getPath() != null) { if (nestedHelper != null && nestedHelper.getPath() != null) {
BitSetProducer rootDocumentsFilter = context.bitsetFilterCache().getBitSetProducer(Queries.newNonNestedFilter()); BitSetProducer rootDocumentsFilter = context.bitsetFilter(Queries.newNonNestedFilter());
Query innerDocumentsFilter; Query innerDocumentsFilter;
if (nestedHelper.filterFound()) { if (nestedHelper.filterFound()) {
// TODO: use queries instead // TODO: use queries instead
@ -152,7 +155,7 @@ public class ScriptSortParser implements SortParser {
} else { } else {
innerDocumentsFilter = nestedHelper.getNestedObjectMapper().nestedTypeFilter(); innerDocumentsFilter = nestedHelper.getNestedObjectMapper().nestedTypeFilter();
} }
nested = new Nested(rootDocumentsFilter, context.searcher().createNormalizedWeight(innerDocumentsFilter, false)); nested = new Nested(rootDocumentsFilter, innerDocumentsFilter);
} else { } else {
nested = null; nested = null;
} }
@ -205,7 +208,7 @@ public class ScriptSortParser implements SortParser {
}; };
break; break;
default: default:
throw new SearchParseException(context, "custom script sort type [" + type + "] not supported", parser.getTokenLocation()); throw new ParsingException(parser.getTokenLocation(), "custom script sort type [" + type + "] not supported");
} }
return new SortField("_script", fieldComparatorSource, reverse); return new SortField("_script", fieldComparatorSource, reverse);

View File

@ -20,14 +20,20 @@
package org.elasticsearch.search.sort; package org.elasticsearch.search.sort;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentFactory;
import java.util.Objects;
/** /**
* *
*/ */
public abstract class SortBuilder implements ToXContent { public abstract class SortBuilder<T extends SortBuilder<?>> implements ToXContent {
protected SortOrder order = SortOrder.ASC;
public static final ParseField ORDER_FIELD = new ParseField("order");
@Override @Override
public String toString() { public String toString() {
@ -42,13 +48,19 @@ public abstract class SortBuilder implements ToXContent {
} }
/** /**
* The order of sorting. Defaults to {@link SortOrder#ASC}. * Set the order of sorting.
*/ */
public abstract SortBuilder order(SortOrder order); @SuppressWarnings("unchecked")
public T order(SortOrder order) {
Objects.requireNonNull(order, "sort order cannot be null.");
this.order = order;
return (T) this;
}
/** /**
* Sets the value when a field is missing in a doc. Can also be set to <tt>_last</tt> or * Return the {@link SortOrder} used for this {@link SortBuilder}.
* <tt>_first</tt> to sort missing last or first respectively.
*/ */
public abstract SortBuilder missing(Object missing); public SortOrder order() {
return this.order;
}
} }

View File

@ -19,13 +19,12 @@
package org.elasticsearch.search.sort; package org.elasticsearch.search.sort;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.index.query.QueryParseContext;
import java.io.IOException; import java.io.IOException;
// TODO once sort refactoring is done this needs to be merged into SortBuilder // TODO once sort refactoring is done this needs to be merged into SortBuilder
public interface SortElementParserTemp<T extends ToXContent> { public interface SortElementParserTemp<T extends SortBuilder> {
/** /**
* Creates a new SortBuilder from the json held by the {@link SortElementParserTemp} * Creates a new SortBuilder from the json held by the {@link SortElementParserTemp}
* in {@link org.elasticsearch.common.xcontent.XContent} format * in {@link org.elasticsearch.common.xcontent.XContent} format

View File

@ -138,7 +138,7 @@ public class SortParseElement implements SearchParseElement {
addSortField(context, sortFields, fieldName, reverse, unmappedType, missing, sortMode, nestedFilterParseHelper); addSortField(context, sortFields, fieldName, reverse, unmappedType, missing, sortMode, nestedFilterParseHelper);
} else { } else {
if (PARSERS.containsKey(fieldName)) { if (PARSERS.containsKey(fieldName)) {
sortFields.add(PARSERS.get(fieldName).parse(parser, context)); sortFields.add(PARSERS.get(fieldName).parse(parser, context.getQueryShardContext()));
} else { } else {
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) { if (token == XContentParser.Token.FIELD_NAME) {
@ -160,7 +160,7 @@ public class SortParseElement implements SearchParseElement {
sortMode = MultiValueMode.fromString(parser.text()); sortMode = MultiValueMode.fromString(parser.text());
} else if ("nested_path".equals(innerJsonName) || "nestedPath".equals(innerJsonName)) { } else if ("nested_path".equals(innerJsonName) || "nestedPath".equals(innerJsonName)) {
if (nestedFilterParseHelper == null) { if (nestedFilterParseHelper == null) {
nestedFilterParseHelper = new NestedInnerQueryParseSupport(parser, context); nestedFilterParseHelper = new NestedInnerQueryParseSupport(parser, context.getQueryShardContext());
} }
nestedFilterParseHelper.setPath(parser.text()); nestedFilterParseHelper.setPath(parser.text());
} else { } else {
@ -169,7 +169,7 @@ public class SortParseElement implements SearchParseElement {
} else if (token == XContentParser.Token.START_OBJECT) { } else if (token == XContentParser.Token.START_OBJECT) {
if ("nested_filter".equals(innerJsonName) || "nestedFilter".equals(innerJsonName)) { if ("nested_filter".equals(innerJsonName) || "nestedFilter".equals(innerJsonName)) {
if (nestedFilterParseHelper == null) { if (nestedFilterParseHelper == null) {
nestedFilterParseHelper = new NestedInnerQueryParseSupport(parser, context); nestedFilterParseHelper = new NestedInnerQueryParseSupport(parser, context.getQueryShardContext());
} }
nestedFilterParseHelper.filter(); nestedFilterParseHelper.filter();
} else { } else {
@ -231,14 +231,13 @@ public class SortParseElement implements SearchParseElement {
final Nested nested; final Nested nested;
if (nestedHelper != null && nestedHelper.getPath() != null) { if (nestedHelper != null && nestedHelper.getPath() != null) {
BitSetProducer rootDocumentsFilter = context.bitsetFilterCache().getBitSetProducer(Queries.newNonNestedFilter()); BitSetProducer rootDocumentsFilter = context.bitsetFilterCache().getBitSetProducer(Queries.newNonNestedFilter());
Query innerDocumentsFilter; Query innerDocumentsQuery;
if (nestedHelper.filterFound()) { if (nestedHelper.filterFound()) {
// TODO: use queries instead innerDocumentsQuery = nestedHelper.getInnerFilter();
innerDocumentsFilter = nestedHelper.getInnerFilter();
} else { } else {
innerDocumentsFilter = nestedHelper.getNestedObjectMapper().nestedTypeFilter(); innerDocumentsQuery = nestedHelper.getNestedObjectMapper().nestedTypeFilter();
} }
nested = new Nested(rootDocumentsFilter, context.searcher().createNormalizedWeight(innerDocumentsFilter, false)); nested = new Nested(rootDocumentsFilter, innerDocumentsQuery);
} else { } else {
nested = null; nested = null;
} }

View File

@ -21,7 +21,7 @@ package org.elasticsearch.search.sort;
import org.apache.lucene.search.SortField; import org.apache.lucene.search.SortField;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.index.query.QueryShardContext;
/** /**
* *
@ -30,5 +30,5 @@ public interface SortParser {
String[] names(); String[] names();
SortField parse(XContentParser parser, SearchContext context) throws Exception; SortField parse(XContentParser parser, QueryShardContext context) throws Exception;
} }

View File

@ -23,6 +23,7 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.search.SearchParseElement; import org.elasticsearch.search.SearchParseElement;
import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.suggest.SuggestionSearchContext.SuggestionContext; import org.elasticsearch.search.suggest.SuggestionSearchContext.SuggestionContext;
@ -45,12 +46,12 @@ public final class SuggestParseElement implements SearchParseElement {
@Override @Override
public void parse(XContentParser parser, SearchContext context) throws Exception { public void parse(XContentParser parser, SearchContext context) throws Exception {
SuggestionSearchContext suggestionSearchContext = parseInternal(parser, context.mapperService(), context.fieldData(), SuggestionSearchContext suggestionSearchContext = parseInternal(parser, context.mapperService(), context.fieldData(),
context.shardTarget().index(), context.shardTarget().shardId()); context.shardTarget().shardId());
context.suggest(suggestionSearchContext); context.suggest(suggestionSearchContext);
} }
public SuggestionSearchContext parseInternal(XContentParser parser, MapperService mapperService, IndexFieldDataService fieldDataService, public SuggestionSearchContext parseInternal(XContentParser parser, MapperService mapperService,
String index, int shardId) throws IOException { IndexFieldDataService fieldDataService, ShardId shardId) throws IOException {
SuggestionSearchContext suggestionSearchContext = new SuggestionSearchContext(); SuggestionSearchContext suggestionSearchContext = new SuggestionSearchContext();
BytesRef globalText = null; BytesRef globalText = null;
@ -119,7 +120,6 @@ public final class SuggestParseElement implements SearchParseElement {
SuggestionContext suggestionContext = entry.getValue(); SuggestionContext suggestionContext = entry.getValue();
suggestionContext.setShard(shardId); suggestionContext.setShard(shardId);
suggestionContext.setIndex(index);
SuggestUtils.verifySuggestion(mapperService, globalText, suggestionContext); SuggestUtils.verifySuggestion(mapperService, globalText, suggestionContext);
suggestionSearchContext.addSuggestion(suggestionName, suggestionContext); suggestionSearchContext.addSuggestion(suggestionName, suggestionContext);
} }

View File

@ -20,6 +20,7 @@ package org.elasticsearch.search.suggest;
import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.elasticsearch.index.shard.ShardId;
import java.util.LinkedHashMap; import java.util.LinkedHashMap;
import java.util.Map; import java.util.Map;
@ -36,9 +37,9 @@ public class SuggestionSearchContext {
public Map<String, SuggestionContext> suggestions() { public Map<String, SuggestionContext> suggestions() {
return suggestions; return suggestions;
} }
public static class SuggestionContext { public static class SuggestionContext {
private BytesRef text; private BytesRef text;
private BytesRef prefix; private BytesRef prefix;
private BytesRef regex; private BytesRef regex;
@ -47,9 +48,8 @@ public class SuggestionSearchContext {
private Analyzer analyzer; private Analyzer analyzer;
private int size = 5; private int size = 5;
private int shardSize = -1; private int shardSize = -1;
private int shardId; private ShardId shardId;
private String index;
public BytesRef getText() { public BytesRef getText() {
return text; return text;
} }
@ -119,20 +119,12 @@ public class SuggestionSearchContext {
} }
this.shardSize = shardSize; this.shardSize = shardSize;
} }
public void setShard(int shardId) { public void setShard(ShardId shardId) {
this.shardId = shardId; this.shardId = shardId;
} }
public void setIndex(String index) { public ShardId getShard() {
this.index = index;
}
public String getIndex() {
return index;
}
public int getShard() {
return shardId; return shardId;
} }
} }

View File

@ -117,7 +117,7 @@ public final class PhraseSuggester extends Suggester<PhraseSuggestionContext> {
vars.put(SUGGESTION_TEMPLATE_VAR_NAME, spare.toString()); vars.put(SUGGESTION_TEMPLATE_VAR_NAME, spare.toString());
final ExecutableScript executable = scriptService.executable(collateScript, vars); final ExecutableScript executable = scriptService.executable(collateScript, vars);
final BytesReference querySource = (BytesReference) executable.run(); final BytesReference querySource = (BytesReference) executable.run();
IndexService indexService = indicesService.indexService(suggestion.getIndex()); IndexService indexService = indicesService.indexService(suggestion.getShard().getIndex());
final ParsedQuery parsedQuery = indexService.newQueryShardContext().parse(querySource); final ParsedQuery parsedQuery = indexService.newQueryShardContext().parse(querySource);
collateMatch = Lucene.exists(searcher, parsedQuery.query()); collateMatch = Lucene.exists(searcher, parsedQuery.query());
} }

View File

@ -33,6 +33,7 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.math.MathUtils; import org.elasticsearch.common.math.MathUtils;
import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.metrics.CounterMetric;
@ -943,8 +944,8 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
} }
globalLock.readLock().lock(); globalLock.readLock().lock();
try { try {
connectionLock.acquire(node.id());
try { try (Releasable ignored = connectionLock.acquire(node.id())) {
if (!lifecycle.started()) { if (!lifecycle.started()) {
throw new IllegalStateException("can't add nodes to a stopped transport"); throw new IllegalStateException("can't add nodes to a stopped transport");
} }
@ -979,8 +980,6 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
} catch (Exception e) { } catch (Exception e) {
throw new ConnectTransportException(node, "general node connection failure", e); throw new ConnectTransportException(node, "general node connection failure", e);
} }
} finally {
connectionLock.release(node.id());
} }
} finally { } finally {
globalLock.readLock().unlock(); globalLock.readLock().unlock();
@ -1103,8 +1102,8 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
@Override @Override
public void disconnectFromNode(DiscoveryNode node) { public void disconnectFromNode(DiscoveryNode node) {
connectionLock.acquire(node.id());
try { try (Releasable ignored = connectionLock.acquire(node.id())) {
NodeChannels nodeChannels = connectedNodes.remove(node); NodeChannels nodeChannels = connectedNodes.remove(node);
if (nodeChannels != null) { if (nodeChannels != null) {
try { try {
@ -1115,8 +1114,6 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
transportServiceAdapter.raiseNodeDisconnected(node); transportServiceAdapter.raiseNodeDisconnected(node);
} }
} }
} finally {
connectionLock.release(node.id());
} }
} }
@ -1128,8 +1125,7 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
// check outside of the lock // check outside of the lock
NodeChannels nodeChannels = connectedNodes.get(node); NodeChannels nodeChannels = connectedNodes.get(node);
if (nodeChannels != null && nodeChannels.hasChannel(channel)) { if (nodeChannels != null && nodeChannels.hasChannel(channel)) {
connectionLock.acquire(node.id()); try (Releasable ignored = connectionLock.acquire(node.id())) {
try {
nodeChannels = connectedNodes.get(node); nodeChannels = connectedNodes.get(node);
// check again within the connection lock, if its still applicable to remove it // check again within the connection lock, if its still applicable to remove it
if (nodeChannels != null && nodeChannels.hasChannel(channel)) { if (nodeChannels != null && nodeChannels.hasChannel(channel)) {
@ -1143,8 +1139,6 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
} }
return true; return true;
} }
} finally {
connectionLock.release(node.id());
} }
} }
return false; return false;

View File

@ -194,7 +194,7 @@ public abstract class TaskManagerTestCase extends ESTestCase {
} }
}; };
transportService.start(); transportService.start();
clusterService = new TestClusterService(threadPool, transportService); clusterService = new TestClusterService(threadPool);
clusterService.add(transportService.getTaskManager()); clusterService.add(transportService.getTaskManager());
discoveryNode = new DiscoveryNode(name, transportService.boundAddress().publishAddress(), Version.CURRENT); discoveryNode = new DiscoveryNode(name, transportService.boundAddress().publishAddress(), Version.CURRENT);
IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(settings); IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(settings);
@ -238,7 +238,7 @@ public abstract class TaskManagerTestCase extends ESTestCase {
RecordingTaskManagerListener[] listeners = new RecordingTaskManagerListener[nodes.length]; RecordingTaskManagerListener[] listeners = new RecordingTaskManagerListener[nodes.length];
for (int i = 0; i < nodes.length; i++) { for (int i = 0; i < nodes.length; i++) {
listeners[i] = new RecordingTaskManagerListener(nodes[i].discoveryNode, actionMasks); listeners[i] = new RecordingTaskManagerListener(nodes[i].discoveryNode, actionMasks);
((MockTaskManager) (nodes[i].clusterService.getTaskManager())).addListener(listeners[i]); ((MockTaskManager) (nodes[i].transportService.getTaskManager())).addListener(listeners[i]);
} }
return listeners; return listeners;
} }

View File

@ -43,6 +43,7 @@ import org.elasticsearch.test.tasks.MockTaskManager;
import org.elasticsearch.test.tasks.MockTaskManagerListener; import org.elasticsearch.test.tasks.MockTaskManagerListener;
import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.test.transport.MockTransportService;
import org.elasticsearch.transport.ReceiveTimeoutTransportException; import org.elasticsearch.transport.ReceiveTimeoutTransportException;
import org.elasticsearch.transport.TransportService;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
@ -263,8 +264,8 @@ public class TasksIT extends ESIntegTestCase {
ReentrantLock taskFinishLock = new ReentrantLock(); ReentrantLock taskFinishLock = new ReentrantLock();
taskFinishLock.lock(); taskFinishLock.lock();
CountDownLatch taskRegistered = new CountDownLatch(1); CountDownLatch taskRegistered = new CountDownLatch(1);
for (ClusterService clusterService : internalCluster().getInstances(ClusterService.class)) { for (TransportService transportService : internalCluster().getInstances(TransportService.class)) {
((MockTaskManager)clusterService.getTaskManager()).addListener(new MockTaskManagerListener() { ((MockTaskManager) transportService.getTaskManager()).addListener(new MockTaskManagerListener() {
@Override @Override
public void onTaskRegistered(Task task) { public void onTaskRegistered(Task task) {
if (task.getAction().startsWith(IndexAction.NAME)) { if (task.getAction().startsWith(IndexAction.NAME)) {
@ -408,7 +409,7 @@ public class TasksIT extends ESIntegTestCase {
@Override @Override
public void tearDown() throws Exception { public void tearDown() throws Exception {
for (Map.Entry<Tuple<String, String>, RecordingTaskManagerListener> entry : listeners.entrySet()) { for (Map.Entry<Tuple<String, String>, RecordingTaskManagerListener> entry : listeners.entrySet()) {
((MockTaskManager)internalCluster().getInstance(ClusterService.class, entry.getKey().v1()).getTaskManager()).removeListener(entry.getValue()); ((MockTaskManager) internalCluster().getInstance(TransportService.class, entry.getKey().v1()).getTaskManager()).removeListener(entry.getValue());
} }
listeners.clear(); listeners.clear();
super.tearDown(); super.tearDown();
@ -418,10 +419,10 @@ public class TasksIT extends ESIntegTestCase {
* Registers recording task event listeners with the given action mask on all nodes * Registers recording task event listeners with the given action mask on all nodes
*/ */
private void registerTaskManageListeners(String actionMasks) { private void registerTaskManageListeners(String actionMasks) {
for (ClusterService clusterService : internalCluster().getInstances(ClusterService.class)) { for (String nodeName : internalCluster().getNodeNames()) {
DiscoveryNode node = clusterService.localNode(); DiscoveryNode node = internalCluster().getInstance(ClusterService.class, nodeName).localNode();
RecordingTaskManagerListener listener = new RecordingTaskManagerListener(node, Strings.splitStringToArray(actionMasks, ',')); RecordingTaskManagerListener listener = new RecordingTaskManagerListener(node, Strings.splitStringToArray(actionMasks, ','));
((MockTaskManager)clusterService.getTaskManager()).addListener(listener); ((MockTaskManager) internalCluster().getInstance(TransportService.class, nodeName).getTaskManager()).addListener(listener);
RecordingTaskManagerListener oldListener = listeners.put(new Tuple<>(node.name(), actionMasks), listener); RecordingTaskManagerListener oldListener = listeners.put(new Tuple<>(node.name(), actionMasks), listener);
assertNull(oldListener); assertNull(oldListener);
} }

View File

@ -32,6 +32,7 @@ import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.common.collect.ImmutableOpenIntMap; import org.elasticsearch.common.collect.ImmutableOpenIntMap;
import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.IndicesService;
@ -157,6 +158,7 @@ public class IndicesShardStoreRequestIT extends ESIntegTestCase {
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "5") .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "5")
.put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false)
)); ));
indexRandomData(index); indexRandomData(index);
ensureGreen(index); ensureGreen(index);
@ -165,9 +167,10 @@ public class IndicesShardStoreRequestIT extends ESIntegTestCase {
logger.info("--> corrupt random shard copies"); logger.info("--> corrupt random shard copies");
Map<Integer, Set<String>> corruptedShardIDMap = new HashMap<>(); Map<Integer, Set<String>> corruptedShardIDMap = new HashMap<>();
Index idx = resolveIndex(index);
for (String node : internalCluster().nodesInclude(index)) { for (String node : internalCluster().nodesInclude(index)) {
IndicesService indexServices = internalCluster().getInstance(IndicesService.class, node); IndicesService indexServices = internalCluster().getInstance(IndicesService.class, node);
IndexService indexShards = indexServices.indexServiceSafe(index); IndexService indexShards = indexServices.indexServiceSafe(idx);
for (Integer shardId : indexShards.shardIds()) { for (Integer shardId : indexShards.shardIds()) {
IndexShard shard = indexShards.getShard(shardId); IndexShard shard = indexShards.getShard(shardId);
if (randomBoolean()) { if (randomBoolean()) {

View File

@ -113,7 +113,7 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase {
@Override @Override
protected ShardIterator shards(ClusterState clusterState, Request request) { protected ShardIterator shards(ClusterState clusterState, Request request) {
return clusterState.routingTable().index(request.concreteIndex()).shard(request.shardId).primaryShardIt(); return clusterState.routingTable().index(request.concreteIndex()).shard(request.shardId.getId()).primaryShardIt();
} }
} }
@ -178,7 +178,7 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase {
public void testBasicRequestWorks() throws InterruptedException, ExecutionException, TimeoutException { public void testBasicRequestWorks() throws InterruptedException, ExecutionException, TimeoutException {
Request request = new Request().index("test"); Request request = new Request().index("test");
request.shardId = 0; request.shardId = new ShardId("test", "_na_", 0);
PlainActionFuture<Response> listener = new PlainActionFuture<>(); PlainActionFuture<Response> listener = new PlainActionFuture<>();
clusterService.setState(ClusterStateCreationUtils.state("test", randomBoolean(), ShardRoutingState.STARTED)); clusterService.setState(ClusterStateCreationUtils.state("test", randomBoolean(), ShardRoutingState.STARTED));
action.new AsyncSingleAction(request, listener).start(); action.new AsyncSingleAction(request, listener).start();
@ -189,7 +189,7 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase {
public void testFailureWithoutRetry() throws Exception { public void testFailureWithoutRetry() throws Exception {
Request request = new Request().index("test"); Request request = new Request().index("test");
request.shardId = 0; request.shardId = new ShardId("test", "_na_", 0);
PlainActionFuture<Response> listener = new PlainActionFuture<>(); PlainActionFuture<Response> listener = new PlainActionFuture<>();
clusterService.setState(ClusterStateCreationUtils.state("test", randomBoolean(), ShardRoutingState.STARTED)); clusterService.setState(ClusterStateCreationUtils.state("test", randomBoolean(), ShardRoutingState.STARTED));
@ -215,7 +215,7 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase {
public void testSuccessAfterRetryWithClusterStateUpdate() throws Exception { public void testSuccessAfterRetryWithClusterStateUpdate() throws Exception {
Request request = new Request().index("test"); Request request = new Request().index("test");
request.shardId = 0; request.shardId = new ShardId("test", "_na_", 0);
PlainActionFuture<Response> listener = new PlainActionFuture<>(); PlainActionFuture<Response> listener = new PlainActionFuture<>();
boolean local = randomBoolean(); boolean local = randomBoolean();
clusterService.setState(ClusterStateCreationUtils.state("test", local, ShardRoutingState.INITIALIZING)); clusterService.setState(ClusterStateCreationUtils.state("test", local, ShardRoutingState.INITIALIZING));
@ -231,7 +231,7 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase {
public void testSuccessAfterRetryWithExceptionFromTransport() throws Exception { public void testSuccessAfterRetryWithExceptionFromTransport() throws Exception {
Request request = new Request().index("test"); Request request = new Request().index("test");
request.shardId = 0; request.shardId = new ShardId("test", "_na_", 0);
PlainActionFuture<Response> listener = new PlainActionFuture<>(); PlainActionFuture<Response> listener = new PlainActionFuture<>();
boolean local = randomBoolean(); boolean local = randomBoolean();
clusterService.setState(ClusterStateCreationUtils.state("test", local, ShardRoutingState.STARTED)); clusterService.setState(ClusterStateCreationUtils.state("test", local, ShardRoutingState.STARTED));
@ -250,7 +250,7 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase {
public void testRetryOfAnAlreadyTimedOutRequest() throws Exception { public void testRetryOfAnAlreadyTimedOutRequest() throws Exception {
Request request = new Request().index("test").timeout(new TimeValue(0, TimeUnit.MILLISECONDS)); Request request = new Request().index("test").timeout(new TimeValue(0, TimeUnit.MILLISECONDS));
request.shardId = 0; request.shardId = new ShardId("test", "_na_", 0);
PlainActionFuture<Response> listener = new PlainActionFuture<>(); PlainActionFuture<Response> listener = new PlainActionFuture<>();
clusterService.setState(ClusterStateCreationUtils.state("test", randomBoolean(), ShardRoutingState.STARTED)); clusterService.setState(ClusterStateCreationUtils.state("test", randomBoolean(), ShardRoutingState.STARTED));
action.new AsyncSingleAction(request, listener).start(); action.new AsyncSingleAction(request, listener).start();
@ -299,7 +299,7 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase {
} }
}; };
Request request = new Request().index("test"); Request request = new Request().index("test");
request.shardId = 0; request.shardId = new ShardId("test", "_na_", 0);
PlainActionFuture<Response> listener = new PlainActionFuture<>(); PlainActionFuture<Response> listener = new PlainActionFuture<>();
clusterService.setState(ClusterStateCreationUtils.state("test", randomBoolean(), ShardRoutingState.STARTED)); clusterService.setState(ClusterStateCreationUtils.state("test", randomBoolean(), ShardRoutingState.STARTED));
action.new AsyncSingleAction(request, listener).start(); action.new AsyncSingleAction(request, listener).start();

View File

@ -30,6 +30,7 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.collect.MapBuilder;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.DummyTransportAddress; import org.elasticsearch.common.transport.DummyTransportAddress;
import org.elasticsearch.index.Index;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import java.util.ArrayList; import java.util.ArrayList;
@ -37,6 +38,7 @@ import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.stream.Collectors;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
@ -220,7 +222,7 @@ public class ClusterChangedEventTests extends ESTestCase {
final ClusterState newState = nextState(previousState, changeClusterUUID, addedIndices, delIndices, 0); final ClusterState newState = nextState(previousState, changeClusterUUID, addedIndices, delIndices, 0);
final ClusterChangedEvent event = new ClusterChangedEvent("_na_", newState, previousState); final ClusterChangedEvent event = new ClusterChangedEvent("_na_", newState, previousState);
final List<String> addsFromEvent = event.indicesCreated(); final List<String> addsFromEvent = event.indicesCreated();
final List<String> delsFromEvent = event.indicesDeleted(); final List<String> delsFromEvent = event.indicesDeleted().stream().map((s) -> s.getName()).collect(Collectors.toList());
Collections.sort(addsFromEvent); Collections.sort(addsFromEvent);
Collections.sort(delsFromEvent); Collections.sort(delsFromEvent);
assertThat(addsFromEvent, equalTo(addedIndices)); assertThat(addsFromEvent, equalTo(addedIndices));

View File

@ -18,16 +18,12 @@
*/ */
package org.elasticsearch.cluster; package org.elasticsearch.cluster;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse; import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.InternalClusterService;
import org.elasticsearch.cluster.service.PendingClusterTask; import org.elasticsearch.cluster.service.PendingClusterTask;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Priority; import org.elasticsearch.common.Priority;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.component.LifecycleComponent;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
@ -35,38 +31,24 @@ import org.elasticsearch.common.inject.Singleton;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.discovery.zen.ZenDiscovery;
import org.elasticsearch.node.Node;
import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.test.ESIntegTestCase.Scope;
import org.elasticsearch.test.InternalTestCluster;
import org.elasticsearch.test.MockLogAppender;
import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.junit.annotations.TestLogging;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.CountDownLatch; import java.util.concurrent.CountDownLatch;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo;
@ -85,74 +67,6 @@ public class ClusterServiceIT extends ESIntegTestCase {
return pluginList(TestPlugin.class); return pluginList(TestPlugin.class);
} }
public void testTimeoutUpdateTask() throws Exception {
Settings settings = settingsBuilder()
.put("discovery.type", "local")
.build();
internalCluster().startNode(settings);
ClusterService clusterService1 = internalCluster().getInstance(ClusterService.class);
final CountDownLatch block = new CountDownLatch(1);
clusterService1.submitStateUpdateTask("test1", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
try {
block.await();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
return currentState;
}
@Override
public void onFailure(String source, Throwable t) {
throw new RuntimeException(t);
}
});
final CountDownLatch timedOut = new CountDownLatch(1);
final AtomicBoolean executeCalled = new AtomicBoolean();
clusterService1.submitStateUpdateTask("test2", new ClusterStateUpdateTask() {
@Override
public TimeValue timeout() {
return TimeValue.timeValueMillis(2);
}
@Override
public void onFailure(String source, Throwable t) {
timedOut.countDown();
}
@Override
public ClusterState execute(ClusterState currentState) {
executeCalled.set(true);
return currentState;
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
}
});
timedOut.await();
block.countDown();
final CountDownLatch allProcessed = new CountDownLatch(1);
clusterService1.submitStateUpdateTask("test3", new ClusterStateUpdateTask() {
@Override
public void onFailure(String source, Throwable t) {
throw new RuntimeException(t);
}
@Override
public ClusterState execute(ClusterState currentState) {
allProcessed.countDown();
return currentState;
}
});
allProcessed.await(); // executed another task to double check that execute on the timed out update task is not called...
assertThat(executeCalled.get(), equalTo(false));
}
public void testAckedUpdateTask() throws Exception { public void testAckedUpdateTask() throws Exception {
Settings settings = settingsBuilder() Settings settings = settingsBuilder()
.put("discovery.type", "local") .put("discovery.type", "local")
@ -299,63 +213,6 @@ public class ClusterServiceIT extends ESIntegTestCase {
assertThat(processedLatch.await(1, TimeUnit.SECONDS), equalTo(true)); assertThat(processedLatch.await(1, TimeUnit.SECONDS), equalTo(true));
} }
public void testMasterAwareExecution() throws Exception {
Settings settings = settingsBuilder()
.put("discovery.type", "local")
.build();
InternalTestCluster.Async<String> master = internalCluster().startNodeAsync(settings);
InternalTestCluster.Async<String> nonMaster = internalCluster().startNodeAsync(settingsBuilder().put(settings).put(Node.NODE_MASTER_SETTING.getKey(), false).build());
master.get();
ensureGreen(); // make sure we have a cluster
ClusterService clusterService = internalCluster().getInstance(ClusterService.class, nonMaster.get());
final boolean[] taskFailed = {false};
final CountDownLatch latch1 = new CountDownLatch(1);
clusterService.submitStateUpdateTask("test", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
latch1.countDown();
return currentState;
}
@Override
public void onFailure(String source, Throwable t) {
taskFailed[0] = true;
latch1.countDown();
}
});
latch1.await();
assertTrue("cluster state update task was executed on a non-master", taskFailed[0]);
taskFailed[0] = true;
final CountDownLatch latch2 = new CountDownLatch(1);
clusterService.submitStateUpdateTask("test", new ClusterStateUpdateTask() {
@Override
public boolean runOnlyOnMaster() {
return false;
}
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
taskFailed[0] = false;
latch2.countDown();
return currentState;
}
@Override
public void onFailure(String source, Throwable t) {
taskFailed[0] = true;
latch2.countDown();
}
});
latch2.await();
assertFalse("non-master cluster state update task was not executed", taskFailed[0]);
}
public void testAckedUpdateTaskNoAckExpected() throws Exception { public void testAckedUpdateTaskNoAckExpected() throws Exception {
Settings settings = settingsBuilder() Settings settings = settingsBuilder()
.put("discovery.type", "local") .put("discovery.type", "local")
@ -715,571 +572,6 @@ public class ClusterServiceIT extends ESIntegTestCase {
} }
} }
/**
* Note, this test can only work as long as we have a single thread executor executing the state update tasks!
*/
public void testPrioritizedTasks() throws Exception {
Settings settings = settingsBuilder()
.put("discovery.type", "local")
.build();
internalCluster().startNode(settings);
ClusterService clusterService = internalCluster().getInstance(ClusterService.class);
BlockingTask block = new BlockingTask(Priority.IMMEDIATE);
clusterService.submitStateUpdateTask("test", block);
int taskCount = randomIntBetween(5, 20);
Priority[] priorities = Priority.values();
// will hold all the tasks in the order in which they were executed
List<PrioritizedTask> tasks = new ArrayList<>(taskCount);
CountDownLatch latch = new CountDownLatch(taskCount);
for (int i = 0; i < taskCount; i++) {
Priority priority = priorities[randomIntBetween(0, priorities.length - 1)];
clusterService.submitStateUpdateTask("test", new PrioritizedTask(priority, latch, tasks));
}
block.release();
latch.await();
Priority prevPriority = null;
for (PrioritizedTask task : tasks) {
if (prevPriority == null) {
prevPriority = task.priority();
} else {
assertThat(task.priority().sameOrAfter(prevPriority), is(true));
}
}
}
/*
* test that a listener throwing an exception while handling a
* notification does not prevent publication notification to the
* executor
*/
public void testClusterStateTaskListenerThrowingExceptionIsOkay() throws InterruptedException {
Settings settings = settingsBuilder()
.put("discovery.type", "local")
.build();
internalCluster().startNode(settings);
ClusterService clusterService = internalCluster().getInstance(ClusterService.class);
final CountDownLatch latch = new CountDownLatch(1);
AtomicBoolean published = new AtomicBoolean();
clusterService.submitStateUpdateTask(
"testClusterStateTaskListenerThrowingExceptionIsOkay",
new Object(),
ClusterStateTaskConfig.build(Priority.NORMAL),
new ClusterStateTaskExecutor<Object>() {
@Override
public boolean runOnlyOnMaster() {
return false;
}
@Override
public BatchResult<Object> execute(ClusterState currentState, List<Object> tasks) throws Exception {
ClusterState newClusterState = ClusterState.builder(currentState).build();
return BatchResult.builder().successes(tasks).build(newClusterState);
}
@Override
public void clusterStatePublished(ClusterState newClusterState) {
published.set(true);
latch.countDown();
}
},
new ClusterStateTaskListener() {
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
throw new IllegalStateException(source);
}
@Override
public void onFailure(String source, Throwable t) {
}
}
);
latch.await();
assertTrue(published.get());
}
// test that for a single thread, tasks are executed in the order
// that they are submitted
public void testClusterStateUpdateTasksAreExecutedInOrder() throws BrokenBarrierException, InterruptedException {
Settings settings = settingsBuilder()
.put("discovery.type", "local")
.build();
internalCluster().startNode(settings);
ClusterService clusterService = internalCluster().getInstance(ClusterService.class);
class TaskExecutor implements ClusterStateTaskExecutor<Integer> {
List<Integer> tasks = new ArrayList<>();
@Override
public BatchResult<Integer> execute(ClusterState currentState, List<Integer> tasks) throws Exception {
this.tasks.addAll(tasks);
return BatchResult.<Integer>builder().successes(tasks).build(ClusterState.builder(currentState).build());
}
@Override
public boolean runOnlyOnMaster() {
return false;
}
}
int numberOfThreads = randomIntBetween(2, 8);
TaskExecutor[] executors = new TaskExecutor[numberOfThreads];
for (int i = 0; i < numberOfThreads; i++) {
executors[i] = new TaskExecutor();
}
int tasksSubmittedPerThread = randomIntBetween(2, 1024);
CopyOnWriteArrayList<Tuple<String, Throwable>> failures = new CopyOnWriteArrayList<>();
CountDownLatch updateLatch = new CountDownLatch(numberOfThreads * tasksSubmittedPerThread);
ClusterStateTaskListener listener = new ClusterStateTaskListener() {
@Override
public void onFailure(String source, Throwable t) {
logger.error("unexpected failure: [{}]", t, source);
failures.add(new Tuple<>(source, t));
updateLatch.countDown();
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
updateLatch.countDown();
}
};
CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads);
for (int i = 0; i < numberOfThreads; i++) {
final int index = i;
Thread thread = new Thread(() -> {
try {
barrier.await();
for (int j = 0; j < tasksSubmittedPerThread; j++) {
clusterService.submitStateUpdateTask("[" + index + "][" + j + "]", j, ClusterStateTaskConfig.build(randomFrom(Priority.values())), executors[index], listener);
}
barrier.await();
} catch (InterruptedException | BrokenBarrierException e) {
throw new AssertionError(e);
}
});
thread.start();
}
// wait for all threads to be ready
barrier.await();
// wait for all threads to finish
barrier.await();
updateLatch.await();
assertThat(failures, empty());
for (int i = 0; i < numberOfThreads; i++) {
assertEquals(tasksSubmittedPerThread, executors[i].tasks.size());
for (int j = 0; j < tasksSubmittedPerThread; j++) {
assertNotNull(executors[i].tasks.get(j));
assertEquals("cluster state update task executed out of order", j, (int)executors[i].tasks.get(j));
}
}
}
public void testClusterStateBatchedUpdates() throws BrokenBarrierException, InterruptedException {
Settings settings = settingsBuilder()
.put("discovery.type", "local")
.build();
internalCluster().startNode(settings);
ClusterService clusterService = internalCluster().getInstance(ClusterService.class);
AtomicInteger counter = new AtomicInteger();
class Task {
private AtomicBoolean state = new AtomicBoolean();
public void execute() {
if (!state.compareAndSet(false, true)) {
throw new IllegalStateException();
} else {
counter.incrementAndGet();
}
}
}
int numberOfThreads = randomIntBetween(2, 8);
int tasksSubmittedPerThread = randomIntBetween(1, 1024);
int numberOfExecutors = Math.max(1, numberOfThreads / 4);
final Semaphore semaphore = new Semaphore(numberOfExecutors);
class TaskExecutor implements ClusterStateTaskExecutor<Task> {
private AtomicInteger counter = new AtomicInteger();
private AtomicInteger batches = new AtomicInteger();
private AtomicInteger published = new AtomicInteger();
@Override
public BatchResult<Task> execute(ClusterState currentState, List<Task> tasks) throws Exception {
tasks.forEach(task -> task.execute());
counter.addAndGet(tasks.size());
ClusterState maybeUpdatedClusterState = currentState;
if (randomBoolean()) {
maybeUpdatedClusterState = ClusterState.builder(currentState).build();
batches.incrementAndGet();
semaphore.acquire();
}
return BatchResult.<Task>builder().successes(tasks).build(maybeUpdatedClusterState);
}
@Override
public boolean runOnlyOnMaster() {
return false;
}
@Override
public void clusterStatePublished(ClusterState newClusterState) {
published.incrementAndGet();
semaphore.release();
}
}
ConcurrentMap<String, AtomicInteger> counters = new ConcurrentHashMap<>();
CountDownLatch updateLatch = new CountDownLatch(numberOfThreads * tasksSubmittedPerThread);
ClusterStateTaskListener listener = new ClusterStateTaskListener() {
@Override
public void onFailure(String source, Throwable t) {
assert false;
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
counters.computeIfAbsent(source, key -> new AtomicInteger()).incrementAndGet();
updateLatch.countDown();
}
};
List<TaskExecutor> executors = new ArrayList<>();
for (int i = 0; i < numberOfExecutors; i++) {
executors.add(new TaskExecutor());
}
// randomly assign tasks to executors
List<TaskExecutor> assignments = new ArrayList<>();
for (int i = 0; i < numberOfThreads; i++) {
for (int j = 0; j < tasksSubmittedPerThread; j++) {
assignments.add(randomFrom(executors));
}
}
Map<TaskExecutor, Integer> counts = new HashMap<>();
for (TaskExecutor executor : assignments) {
counts.merge(executor, 1, (previous, one) -> previous + one);
}
CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads);
for (int i = 0; i < numberOfThreads; i++) {
final int index = i;
Thread thread = new Thread(() -> {
try {
barrier.await();
for (int j = 0; j < tasksSubmittedPerThread; j++) {
ClusterStateTaskExecutor<Task> executor = assignments.get(index * tasksSubmittedPerThread + j);
clusterService.submitStateUpdateTask(
Thread.currentThread().getName(),
new Task(),
ClusterStateTaskConfig.build(randomFrom(Priority.values())),
executor,
listener);
}
barrier.await();
} catch (BrokenBarrierException | InterruptedException e) {
throw new AssertionError(e);
}
});
thread.start();
}
// wait for all threads to be ready
barrier.await();
// wait for all threads to finish
barrier.await();
// wait until all the cluster state updates have been processed
updateLatch.await();
// and until all of the publication callbacks have completed
semaphore.acquire(numberOfExecutors);
// assert the number of executed tasks is correct
assertEquals(numberOfThreads * tasksSubmittedPerThread, counter.get());
// assert each executor executed the correct number of tasks
for (TaskExecutor executor : executors) {
if (counts.containsKey(executor)) {
assertEquals((int) counts.get(executor), executor.counter.get());
assertEquals(executor.batches.get(), executor.published.get());
}
}
// assert the correct number of clusterStateProcessed events were triggered
for (Map.Entry<String, AtomicInteger> entry : counters.entrySet()) {
assertEquals(entry.getValue().get(), tasksSubmittedPerThread);
}
}
@TestLogging("cluster:TRACE") // To ensure that we log cluster state events on TRACE level
public void testClusterStateUpdateLogging() throws Exception {
Settings settings = settingsBuilder()
.put("discovery.type", "local")
.build();
internalCluster().startNode(settings);
ClusterService clusterService1 = internalCluster().getInstance(ClusterService.class);
MockLogAppender mockAppender = new MockLogAppender();
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test1", "cluster.service", Level.DEBUG, "*processing [test1]: took * no change in cluster_state"));
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test2", "cluster.service", Level.TRACE, "*failed to execute cluster state update in *"));
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test3", "cluster.service", Level.DEBUG, "*processing [test3]: took * done applying updated cluster_state (version: *, uuid: *)"));
Logger rootLogger = Logger.getRootLogger();
rootLogger.addAppender(mockAppender);
try {
final CountDownLatch latch = new CountDownLatch(4);
clusterService1.submitStateUpdateTask("test1", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
return currentState;
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
}
@Override
public void onFailure(String source, Throwable t) {
fail();
}
});
clusterService1.submitStateUpdateTask("test2", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
throw new IllegalArgumentException("Testing handling of exceptions in the cluster state task");
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
fail();
}
@Override
public void onFailure(String source, Throwable t) {
latch.countDown();
}
});
clusterService1.submitStateUpdateTask("test3", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
return ClusterState.builder(currentState).incrementVersion().build();
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
}
@Override
public void onFailure(String source, Throwable t) {
fail();
}
});
// Additional update task to make sure all previous logging made it to the logger
// We don't check logging for this on since there is no guarantee that it will occur before our check
clusterService1.submitStateUpdateTask("test4", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
return currentState;
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
}
@Override
public void onFailure(String source, Throwable t) {
fail();
}
});
assertThat(latch.await(1, TimeUnit.SECONDS), equalTo(true));
} finally {
rootLogger.removeAppender(mockAppender);
}
mockAppender.assertAllExpectationsMatched();
}
@TestLogging("cluster:WARN") // To ensure that we log cluster state events on WARN level
public void testLongClusterStateUpdateLogging() throws Exception {
Settings settings = settingsBuilder()
.put("discovery.type", "local")
.put(InternalClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.getKey(), "10s")
.build();
internalCluster().startNode(settings);
ClusterService clusterService1 = internalCluster().getInstance(ClusterService.class);
MockLogAppender mockAppender = new MockLogAppender();
mockAppender.addExpectation(new MockLogAppender.UnseenEventExpectation("test1 shouldn't see because setting is too low", "cluster.service", Level.WARN, "*cluster state update task [test1] took * above the warn threshold of *"));
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test2", "cluster.service", Level.WARN, "*cluster state update task [test2] took * above the warn threshold of 10ms"));
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test3", "cluster.service", Level.WARN, "*cluster state update task [test3] took * above the warn threshold of 10ms"));
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test4", "cluster.service", Level.WARN, "*cluster state update task [test4] took * above the warn threshold of 10ms"));
Logger rootLogger = Logger.getRootLogger();
rootLogger.addAppender(mockAppender);
try {
final CountDownLatch latch = new CountDownLatch(5);
final CountDownLatch processedFirstTask = new CountDownLatch(1);
clusterService1.submitStateUpdateTask("test1", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
Thread.sleep(100);
return currentState;
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
processedFirstTask.countDown();
}
@Override
public void onFailure(String source, Throwable t) {
fail();
}
});
processedFirstTask.await(1, TimeUnit.SECONDS);
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder()
.put(InternalClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.getKey(), "10ms")));
clusterService1.submitStateUpdateTask("test2", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
Thread.sleep(100);
throw new IllegalArgumentException("Testing handling of exceptions in the cluster state task");
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
fail();
}
@Override
public void onFailure(String source, Throwable t) {
latch.countDown();
}
});
clusterService1.submitStateUpdateTask("test3", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
Thread.sleep(100);
return ClusterState.builder(currentState).incrementVersion().build();
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
}
@Override
public void onFailure(String source, Throwable t) {
fail();
}
});
clusterService1.submitStateUpdateTask("test4", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
Thread.sleep(100);
return currentState;
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
}
@Override
public void onFailure(String source, Throwable t) {
fail();
}
});
// Additional update task to make sure all previous logging made it to the logger
// We don't check logging for this on since there is no guarantee that it will occur before our check
clusterService1.submitStateUpdateTask("test5", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
return currentState;
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
}
@Override
public void onFailure(String source, Throwable t) {
fail();
}
});
assertThat(latch.await(5, TimeUnit.SECONDS), equalTo(true));
} finally {
rootLogger.removeAppender(mockAppender);
}
mockAppender.assertAllExpectationsMatched();
}
private static class BlockingTask extends ClusterStateUpdateTask {
private final CountDownLatch latch = new CountDownLatch(1);
public BlockingTask(Priority priority) {
super(priority);
}
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
latch.await();
return currentState;
}
@Override
public void onFailure(String source, Throwable t) {
}
public void release() {
latch.countDown();
}
}
private static class PrioritizedTask extends ClusterStateUpdateTask {
private final CountDownLatch latch;
private final List<PrioritizedTask> tasks;
private PrioritizedTask(Priority priority, CountDownLatch latch, List<PrioritizedTask> tasks) {
super(priority);
this.latch = latch;
this.tasks = tasks;
}
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
tasks.add(this);
latch.countDown();
return currentState;
}
@Override
public void onFailure(String source, Throwable t) {
latch.countDown();
}
}
public static class TestPlugin extends Plugin { public static class TestPlugin extends Plugin {
@Override @Override

View File

@ -143,11 +143,11 @@ public class DiskUsageTests extends ESTestCase {
}; };
NodeStats[] nodeStats = new NodeStats[] { NodeStats[] nodeStats = new NodeStats[] {
new NodeStats(new DiscoveryNode("node_1", DummyTransportAddress.INSTANCE, Version.CURRENT), 0, new NodeStats(new DiscoveryNode("node_1", DummyTransportAddress.INSTANCE, Version.CURRENT), 0,
null,null,null,null,null,new FsInfo(0, node1FSInfo), null,null,null,null,null), null,null,null,null,null,new FsInfo(0, node1FSInfo), null,null,null,null,null, null),
new NodeStats(new DiscoveryNode("node_2", DummyTransportAddress.INSTANCE, Version.CURRENT), 0, new NodeStats(new DiscoveryNode("node_2", DummyTransportAddress.INSTANCE, Version.CURRENT), 0,
null,null,null,null,null, new FsInfo(0, node2FSInfo), null,null,null,null,null), null,null,null,null,null, new FsInfo(0, node2FSInfo), null,null,null,null,null, null),
new NodeStats(new DiscoveryNode("node_3", DummyTransportAddress.INSTANCE, Version.CURRENT), 0, new NodeStats(new DiscoveryNode("node_3", DummyTransportAddress.INSTANCE, Version.CURRENT), 0,
null,null,null,null,null, new FsInfo(0, node3FSInfo), null,null,null,null,null) null,null,null,null,null, new FsInfo(0, node3FSInfo), null,null,null,null,null, null)
}; };
InternalClusterInfoService.fillDiskUsagePerNode(logger, nodeStats, newLeastAvaiableUsages, newMostAvaiableUsages); InternalClusterInfoService.fillDiskUsagePerNode(logger, nodeStats, newLeastAvaiableUsages, newMostAvaiableUsages);
DiskUsage leastNode_1 = newLeastAvaiableUsages.get("node_1"); DiskUsage leastNode_1 = newLeastAvaiableUsages.get("node_1");
@ -184,11 +184,11 @@ public class DiskUsageTests extends ESTestCase {
}; };
NodeStats[] nodeStats = new NodeStats[] { NodeStats[] nodeStats = new NodeStats[] {
new NodeStats(new DiscoveryNode("node_1", DummyTransportAddress.INSTANCE, Version.CURRENT), 0, new NodeStats(new DiscoveryNode("node_1", DummyTransportAddress.INSTANCE, Version.CURRENT), 0,
null,null,null,null,null,new FsInfo(0, node1FSInfo), null,null,null,null,null), null,null,null,null,null,new FsInfo(0, node1FSInfo), null,null,null,null,null, null),
new NodeStats(new DiscoveryNode("node_2", DummyTransportAddress.INSTANCE, Version.CURRENT), 0, new NodeStats(new DiscoveryNode("node_2", DummyTransportAddress.INSTANCE, Version.CURRENT), 0,
null,null,null,null,null, new FsInfo(0, node2FSInfo), null,null,null,null,null), null,null,null,null,null, new FsInfo(0, node2FSInfo), null,null,null,null,null, null),
new NodeStats(new DiscoveryNode("node_3", DummyTransportAddress.INSTANCE, Version.CURRENT), 0, new NodeStats(new DiscoveryNode("node_3", DummyTransportAddress.INSTANCE, Version.CURRENT), 0,
null,null,null,null,null, new FsInfo(0, node3FSInfo), null,null,null,null,null) null,null,null,null,null, new FsInfo(0, node3FSInfo), null,null,null,null,null, null)
}; };
InternalClusterInfoService.fillDiskUsagePerNode(logger, nodeStats, newLeastAvailableUsages, newMostAvailableUsages); InternalClusterInfoService.fillDiskUsagePerNode(logger, nodeStats, newLeastAvailableUsages, newMostAvailableUsages);
DiskUsage leastNode_1 = newLeastAvailableUsages.get("node_1"); DiskUsage leastNode_1 = newLeastAvailableUsages.get("node_1");

View File

@ -0,0 +1,275 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.common.component.Lifecycle;
import org.elasticsearch.common.component.LifecycleListener;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.BoundTransportAddress;
import org.elasticsearch.common.transport.DummyTransportAddress;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.ConnectTransportException;
import org.elasticsearch.transport.Transport;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportRequestOptions;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.transport.TransportServiceAdapter;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import static org.hamcrest.Matchers.equalTo;
public class NodeConnectionsServiceTests extends ESTestCase {
private static ThreadPool THREAD_POOL;
private MockTransport transport;
private TransportService transportService;
private List<DiscoveryNode> generateNodes() {
List<DiscoveryNode> nodes = new ArrayList<>();
for (int i = randomIntBetween(20, 50); i > 0; i--) {
final HashMap<String, String> attributes = new HashMap<>();
if (rarely()) {
attributes.put("client", "true");
} else {
attributes.put("master", "" + randomBoolean());
attributes.put("data", "" + randomBoolean());
attributes.put("ingest", "" + randomBoolean());
}
nodes.add(new DiscoveryNode("node_" + i, "" + i, DummyTransportAddress.INSTANCE, attributes, Version.CURRENT));
}
return nodes;
}
private ClusterState clusterStateFromNodes(List<DiscoveryNode> nodes) {
final DiscoveryNodes.Builder builder = DiscoveryNodes.builder();
for (DiscoveryNode node : nodes) {
builder.put(node);
}
return ClusterState.builder(new ClusterName("test")).nodes(builder).build();
}
public void testConnectAndDisconnect() {
List<DiscoveryNode> nodes = generateNodes();
NodeConnectionsService service = new NodeConnectionsService(Settings.EMPTY, THREAD_POOL, transportService);
ClusterState current = clusterStateFromNodes(Collections.emptyList());
ClusterChangedEvent event = new ClusterChangedEvent("test", clusterStateFromNodes(randomSubsetOf(nodes)), current);
service.connectToAddedNodes(event);
assertConnected(event.nodesDelta().addedNodes());
service.disconnectFromRemovedNodes(event);
assertConnectedExactlyToNodes(event.state());
current = event.state();
event = new ClusterChangedEvent("test", clusterStateFromNodes(randomSubsetOf(nodes)), current);
service.connectToAddedNodes(event);
assertConnected(event.nodesDelta().addedNodes());
service.disconnectFromRemovedNodes(event);
assertConnectedExactlyToNodes(event.state());
}
public void testReconnect() {
List<DiscoveryNode> nodes = generateNodes();
NodeConnectionsService service = new NodeConnectionsService(Settings.EMPTY, THREAD_POOL, transportService);
ClusterState current = clusterStateFromNodes(Collections.emptyList());
ClusterChangedEvent event = new ClusterChangedEvent("test", clusterStateFromNodes(randomSubsetOf(nodes)), current);
transport.randomConnectionExceptions = true;
service.connectToAddedNodes(event);
for (int i = 0; i < 3; i++) {
// simulate disconnects
for (DiscoveryNode node : randomSubsetOf(nodes)) {
transport.disconnectFromNode(node);
}
service.new ConnectionChecker().run();
}
// disable exceptions so things can be restored
transport.randomConnectionExceptions = false;
service.new ConnectionChecker().run();
assertConnectedExactlyToNodes(event.state());
}
private void assertConnectedExactlyToNodes(ClusterState state) {
assertConnected(state.nodes());
assertThat(transport.connectedNodes.size(), equalTo(state.nodes().size()));
}
private void assertConnected(Iterable<DiscoveryNode> nodes) {
for (DiscoveryNode node : nodes) {
assertTrue("not connected to " + node, transport.connectedNodes.contains(node));
}
}
private void assertNotConnected(Iterable<DiscoveryNode> nodes) {
for (DiscoveryNode node : nodes) {
assertFalse("still connected to " + node, transport.connectedNodes.contains(node));
}
}
@Override
@Before
public void setUp() throws Exception {
super.setUp();
this.transport = new MockTransport();
transportService = new TransportService(transport, THREAD_POOL);
transportService.start();
transportService.acceptIncomingRequests();
}
@Override
@After
public void tearDown() throws Exception {
transportService.stop();
super.tearDown();
}
@AfterClass
public static void stopThreadPool() {
ThreadPool.terminate(THREAD_POOL, 30, TimeUnit.SECONDS);
THREAD_POOL = null;
}
final class MockTransport implements Transport {
Set<DiscoveryNode> connectedNodes = ConcurrentCollections.newConcurrentSet();
volatile boolean randomConnectionExceptions = false;
@Override
public void transportServiceAdapter(TransportServiceAdapter service) {
}
@Override
public BoundTransportAddress boundAddress() {
return null;
}
@Override
public Map<String, BoundTransportAddress> profileBoundAddresses() {
return null;
}
@Override
public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws Exception {
return new TransportAddress[0];
}
@Override
public boolean addressSupported(Class<? extends TransportAddress> address) {
return false;
}
@Override
public boolean nodeConnected(DiscoveryNode node) {
return connectedNodes.contains(node);
}
@Override
public void connectToNode(DiscoveryNode node) throws ConnectTransportException {
if (connectedNodes.contains(node) == false && randomConnectionExceptions && randomBoolean()) {
throw new ConnectTransportException(node, "simulated");
}
connectedNodes.add(node);
}
@Override
public void connectToNodeLight(DiscoveryNode node) throws ConnectTransportException {
}
@Override
public void disconnectFromNode(DiscoveryNode node) {
connectedNodes.remove(node);
}
@Override
public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request,
TransportRequestOptions options) throws IOException, TransportException {
}
@Override
public long serverOpen() {
return 0;
}
@Override
public List<String> getLocalAddresses() {
return null;
}
@Override
public Lifecycle.State lifecycleState() {
return null;
}
@Override
public void addLifecycleListener(LifecycleListener listener) {
}
@Override
public void removeLifecycleListener(LifecycleListener listener) {
}
@Override
public Transport start() {
return null;
}
@Override
public Transport stop() {
return null;
}
@Override
public void close() {
}
}
}

View File

@ -27,6 +27,7 @@ import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterStateTaskExecutor;
import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodeService;
import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
@ -40,7 +41,6 @@ import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation; import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
import org.elasticsearch.cluster.service.InternalClusterService;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index; import org.elasticsearch.index.Index;
import org.elasticsearch.test.ESAllocationTestCase; import org.elasticsearch.test.ESAllocationTestCase;
@ -305,7 +305,7 @@ public class ShardFailedClusterStateTaskExecutorTests extends ESAllocationTestCa
return randomSubsetOf(1, shards.toArray(new ShardRouting[0])).get(0); return randomSubsetOf(1, shards.toArray(new ShardRouting[0])).get(0);
} else { } else {
return return
TestShardRouting.newShardRouting(shardRouting.index(), shardRouting.id(), InternalClusterService.generateNodeId(Settings.EMPTY), randomBoolean(), randomFrom(ShardRoutingState.values())); TestShardRouting.newShardRouting(shardRouting.index(), shardRouting.id(), DiscoveryNodeService.generateNodeId(Settings.EMPTY), randomBoolean(), randomFrom(ShardRoutingState.values()));
} }
} }

View File

@ -47,6 +47,8 @@ public class WildcardExpressionResolverTests extends ESTestCase {
assertThat(newHashSet(resolver.resolve(context, Arrays.asList("test*"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); assertThat(newHashSet(resolver.resolve(context, Arrays.asList("test*"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY")));
assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testX*"))), equalTo(newHashSet("testXXX", "testXYY"))); assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testX*"))), equalTo(newHashSet("testXXX", "testXYY")));
assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testX*", "kuku"))), equalTo(newHashSet("testXXX", "testXYY", "kuku"))); assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testX*", "kuku"))), equalTo(newHashSet("testXXX", "testXYY", "kuku")));
assertThat(newHashSet(resolver.resolve(context, Arrays.asList("*"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY", "kuku")));
assertThat(newHashSet(resolver.resolve(context, Arrays.asList("*", "-kuku"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY")));
} }
public void testConvertWildcardsTests() { public void testConvertWildcardsTests() {
@ -107,6 +109,18 @@ public class WildcardExpressionResolverTests extends ESTestCase {
assertThat(newHashSet(resolver.resolve(context, Arrays.asList("*Y*X"))).size(), equalTo(0)); assertThat(newHashSet(resolver.resolve(context, Arrays.asList("*Y*X"))).size(), equalTo(0));
} }
public void testAll() {
MetaData.Builder mdBuilder = MetaData.builder()
.put(indexBuilder("testXXX"))
.put(indexBuilder("testXYY"))
.put(indexBuilder("testYYY"));
ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build();
IndexNameExpressionResolver.WildcardExpressionResolver resolver = new IndexNameExpressionResolver.WildcardExpressionResolver();
IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen());
assertThat(newHashSet(resolver.resolve(context, Arrays.asList("_all"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY")));
}
private IndexMetaData.Builder indexBuilder(String index) { private IndexMetaData.Builder indexBuilder(String index) {
return IndexMetaData.builder(index).settings(settings(Version.CURRENT).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)); return IndexMetaData.builder(index).settings(settings(Version.CURRENT).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0));
} }

View File

@ -0,0 +1,824 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster.service;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateTaskConfig;
import org.elasticsearch.cluster.ClusterStateTaskExecutor;
import org.elasticsearch.cluster.ClusterStateTaskListener;
import org.elasticsearch.cluster.ClusterStateUpdateTask;
import org.elasticsearch.cluster.NodeConnectionsService;
import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.OperationRouting;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.DummyTransportAddress;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.MockLogAppender;
import org.elasticsearch.test.junit.annotations.TestLogging;
import org.elasticsearch.threadpool.ThreadPool;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.Semaphore;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
public class ClusterServiceTests extends ESTestCase {
static ThreadPool threadPool;
TimedClusterService clusterService;
@BeforeClass
public static void createThreadPool() {
threadPool = new ThreadPool(ClusterServiceTests.class.getName());
}
@AfterClass
public static void stopThreadPool() {
if (threadPool != null) {
threadPool.shutdownNow();
threadPool = null;
}
}
@Before
public void setUp() throws Exception {
super.setUp();
clusterService = createClusterService(true);
}
@After
public void tearDown() throws Exception {
clusterService.close();
super.tearDown();
}
TimedClusterService createClusterService(boolean makeMaster) throws InterruptedException {
TimedClusterService test = new TimedClusterService(Settings.EMPTY, null,
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS),
threadPool, new ClusterName("ClusterServiceTests"));
test.setLocalNode(new DiscoveryNode("node1", DummyTransportAddress.INSTANCE, Version.CURRENT));
test.setNodeConnectionsService(new NodeConnectionsService(Settings.EMPTY, null, null) {
@Override
public void connectToAddedNodes(ClusterChangedEvent event) {
// skip
}
@Override
public void disconnectFromRemovedNodes(ClusterChangedEvent event) {
// skip
}
});
test.setClusterStatePublisher((event, ackListener) -> {
});
test.start();
CountDownLatch latch = new CountDownLatch(1);
test.submitStateUpdateTask("making a master", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
final DiscoveryNodes nodes = currentState.nodes();
final DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(nodes)
.masterNodeId(makeMaster ? nodes.localNodeId() : null);
return ClusterState.builder(currentState).blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK).nodes(nodesBuilder).build();
}
@Override
public boolean runOnlyOnMaster() {
return false;
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
}
@Override
public void onFailure(String source, Throwable t) {
logger.warn("unexpected exception", t);
fail("unexpected exception" + t);
}
});
latch.await();
return test;
}
public void testTimeoutUpdateTask() throws Exception {
final CountDownLatch block = new CountDownLatch(1);
clusterService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
try {
block.await();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
return currentState;
}
@Override
public void onFailure(String source, Throwable t) {
throw new RuntimeException(t);
}
});
final CountDownLatch timedOut = new CountDownLatch(1);
final AtomicBoolean executeCalled = new AtomicBoolean();
clusterService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() {
@Override
public TimeValue timeout() {
return TimeValue.timeValueMillis(2);
}
@Override
public void onFailure(String source, Throwable t) {
timedOut.countDown();
}
@Override
public ClusterState execute(ClusterState currentState) {
executeCalled.set(true);
return currentState;
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
}
});
timedOut.await();
block.countDown();
final CountDownLatch allProcessed = new CountDownLatch(1);
clusterService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() {
@Override
public void onFailure(String source, Throwable t) {
throw new RuntimeException(t);
}
@Override
public ClusterState execute(ClusterState currentState) {
allProcessed.countDown();
return currentState;
}
});
allProcessed.await(); // executed another task to double check that execute on the timed out update task is not called...
assertThat(executeCalled.get(), equalTo(false));
}
public void testMasterAwareExecution() throws Exception {
ClusterService nonMaster = createClusterService(false);
final boolean[] taskFailed = {false};
final CountDownLatch latch1 = new CountDownLatch(1);
nonMaster.submitStateUpdateTask("test", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
latch1.countDown();
return currentState;
}
@Override
public void onFailure(String source, Throwable t) {
taskFailed[0] = true;
latch1.countDown();
}
});
latch1.await();
assertTrue("cluster state update task was executed on a non-master", taskFailed[0]);
taskFailed[0] = true;
final CountDownLatch latch2 = new CountDownLatch(1);
nonMaster.submitStateUpdateTask("test", new ClusterStateUpdateTask() {
@Override
public boolean runOnlyOnMaster() {
return false;
}
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
taskFailed[0] = false;
latch2.countDown();
return currentState;
}
@Override
public void onFailure(String source, Throwable t) {
taskFailed[0] = true;
latch2.countDown();
}
});
latch2.await();
assertFalse("non-master cluster state update task was not executed", taskFailed[0]);
nonMaster.close();
}
/*
* test that a listener throwing an exception while handling a
* notification does not prevent publication notification to the
* executor
*/
public void testClusterStateTaskListenerThrowingExceptionIsOkay() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
AtomicBoolean published = new AtomicBoolean();
clusterService.submitStateUpdateTask(
"testClusterStateTaskListenerThrowingExceptionIsOkay",
new Object(),
ClusterStateTaskConfig.build(Priority.NORMAL),
new ClusterStateTaskExecutor<Object>() {
@Override
public boolean runOnlyOnMaster() {
return false;
}
@Override
public BatchResult<Object> execute(ClusterState currentState, List<Object> tasks) throws Exception {
ClusterState newClusterState = ClusterState.builder(currentState).build();
return BatchResult.builder().successes(tasks).build(newClusterState);
}
@Override
public void clusterStatePublished(ClusterState newClusterState) {
published.set(true);
latch.countDown();
}
},
new ClusterStateTaskListener() {
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
throw new IllegalStateException(source);
}
@Override
public void onFailure(String source, Throwable t) {
}
}
);
latch.await();
assertTrue(published.get());
}
// test that for a single thread, tasks are executed in the order
// that they are submitted
public void testClusterStateUpdateTasksAreExecutedInOrder() throws BrokenBarrierException, InterruptedException {
class TaskExecutor implements ClusterStateTaskExecutor<Integer> {
List<Integer> tasks = new ArrayList<>();
@Override
public BatchResult<Integer> execute(ClusterState currentState, List<Integer> tasks) throws Exception {
this.tasks.addAll(tasks);
return BatchResult.<Integer>builder().successes(tasks).build(ClusterState.builder(currentState).build());
}
@Override
public boolean runOnlyOnMaster() {
return false;
}
}
int numberOfThreads = randomIntBetween(2, 8);
TaskExecutor[] executors = new TaskExecutor[numberOfThreads];
for (int i = 0; i < numberOfThreads; i++) {
executors[i] = new TaskExecutor();
}
int tasksSubmittedPerThread = randomIntBetween(2, 1024);
CopyOnWriteArrayList<Tuple<String, Throwable>> failures = new CopyOnWriteArrayList<>();
CountDownLatch updateLatch = new CountDownLatch(numberOfThreads * tasksSubmittedPerThread);
ClusterStateTaskListener listener = new ClusterStateTaskListener() {
@Override
public void onFailure(String source, Throwable t) {
logger.error("unexpected failure: [{}]", t, source);
failures.add(new Tuple<>(source, t));
updateLatch.countDown();
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
updateLatch.countDown();
}
};
CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads);
for (int i = 0; i < numberOfThreads; i++) {
final int index = i;
Thread thread = new Thread(() -> {
try {
barrier.await();
for (int j = 0; j < tasksSubmittedPerThread; j++) {
clusterService.submitStateUpdateTask("[" + index + "][" + j + "]", j,
ClusterStateTaskConfig.build(randomFrom(Priority.values())), executors[index], listener);
}
barrier.await();
} catch (InterruptedException | BrokenBarrierException e) {
throw new AssertionError(e);
}
});
thread.start();
}
// wait for all threads to be ready
barrier.await();
// wait for all threads to finish
barrier.await();
updateLatch.await();
assertThat(failures, empty());
for (int i = 0; i < numberOfThreads; i++) {
assertEquals(tasksSubmittedPerThread, executors[i].tasks.size());
for (int j = 0; j < tasksSubmittedPerThread; j++) {
assertNotNull(executors[i].tasks.get(j));
assertEquals("cluster state update task executed out of order", j, (int) executors[i].tasks.get(j));
}
}
}
public void testClusterStateBatchedUpdates() throws BrokenBarrierException, InterruptedException {
AtomicInteger counter = new AtomicInteger();
class Task {
private AtomicBoolean state = new AtomicBoolean();
public void execute() {
if (!state.compareAndSet(false, true)) {
throw new IllegalStateException();
} else {
counter.incrementAndGet();
}
}
}
int numberOfThreads = randomIntBetween(2, 8);
int tasksSubmittedPerThread = randomIntBetween(1, 1024);
int numberOfExecutors = Math.max(1, numberOfThreads / 4);
final Semaphore semaphore = new Semaphore(numberOfExecutors);
class TaskExecutor implements ClusterStateTaskExecutor<Task> {
private AtomicInteger counter = new AtomicInteger();
private AtomicInteger batches = new AtomicInteger();
private AtomicInteger published = new AtomicInteger();
@Override
public BatchResult<Task> execute(ClusterState currentState, List<Task> tasks) throws Exception {
tasks.forEach(task -> task.execute());
counter.addAndGet(tasks.size());
ClusterState maybeUpdatedClusterState = currentState;
if (randomBoolean()) {
maybeUpdatedClusterState = ClusterState.builder(currentState).build();
batches.incrementAndGet();
semaphore.acquire();
}
return BatchResult.<Task>builder().successes(tasks).build(maybeUpdatedClusterState);
}
@Override
public boolean runOnlyOnMaster() {
return false;
}
@Override
public void clusterStatePublished(ClusterState newClusterState) {
published.incrementAndGet();
semaphore.release();
}
}
ConcurrentMap<String, AtomicInteger> counters = new ConcurrentHashMap<>();
CountDownLatch updateLatch = new CountDownLatch(numberOfThreads * tasksSubmittedPerThread);
ClusterStateTaskListener listener = new ClusterStateTaskListener() {
@Override
public void onFailure(String source, Throwable t) {
assert false;
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
counters.computeIfAbsent(source, key -> new AtomicInteger()).incrementAndGet();
updateLatch.countDown();
}
};
List<TaskExecutor> executors = new ArrayList<>();
for (int i = 0; i < numberOfExecutors; i++) {
executors.add(new TaskExecutor());
}
// randomly assign tasks to executors
List<TaskExecutor> assignments = new ArrayList<>();
for (int i = 0; i < numberOfThreads; i++) {
for (int j = 0; j < tasksSubmittedPerThread; j++) {
assignments.add(randomFrom(executors));
}
}
Map<TaskExecutor, Integer> counts = new HashMap<>();
for (TaskExecutor executor : assignments) {
counts.merge(executor, 1, (previous, one) -> previous + one);
}
CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads);
for (int i = 0; i < numberOfThreads; i++) {
final int index = i;
Thread thread = new Thread(() -> {
try {
barrier.await();
for (int j = 0; j < tasksSubmittedPerThread; j++) {
ClusterStateTaskExecutor<Task> executor = assignments.get(index * tasksSubmittedPerThread + j);
clusterService.submitStateUpdateTask(
Thread.currentThread().getName(),
new Task(),
ClusterStateTaskConfig.build(randomFrom(Priority.values())),
executor,
listener);
}
barrier.await();
} catch (BrokenBarrierException | InterruptedException e) {
throw new AssertionError(e);
}
});
thread.start();
}
// wait for all threads to be ready
barrier.await();
// wait for all threads to finish
barrier.await();
// wait until all the cluster state updates have been processed
updateLatch.await();
// and until all of the publication callbacks have completed
semaphore.acquire(numberOfExecutors);
// assert the number of executed tasks is correct
assertEquals(numberOfThreads * tasksSubmittedPerThread, counter.get());
// assert each executor executed the correct number of tasks
for (TaskExecutor executor : executors) {
if (counts.containsKey(executor)) {
assertEquals((int) counts.get(executor), executor.counter.get());
assertEquals(executor.batches.get(), executor.published.get());
}
}
// assert the correct number of clusterStateProcessed events were triggered
for (Map.Entry<String, AtomicInteger> entry : counters.entrySet()) {
assertEquals(entry.getValue().get(), tasksSubmittedPerThread);
}
}
/**
* Note, this test can only work as long as we have a single thread executor executing the state update tasks!
*/
public void testPrioritizedTasks() throws Exception {
Settings settings = settingsBuilder()
.put("discovery.type", "local")
.build();
BlockingTask block = new BlockingTask(Priority.IMMEDIATE);
clusterService.submitStateUpdateTask("test", block);
int taskCount = randomIntBetween(5, 20);
Priority[] priorities = Priority.values();
// will hold all the tasks in the order in which they were executed
List<PrioritizedTask> tasks = new ArrayList<>(taskCount);
CountDownLatch latch = new CountDownLatch(taskCount);
for (int i = 0; i < taskCount; i++) {
Priority priority = priorities[randomIntBetween(0, priorities.length - 1)];
clusterService.submitStateUpdateTask("test", new PrioritizedTask(priority, latch, tasks));
}
block.release();
latch.await();
Priority prevPriority = null;
for (PrioritizedTask task : tasks) {
if (prevPriority == null) {
prevPriority = task.priority();
} else {
assertThat(task.priority().sameOrAfter(prevPriority), is(true));
}
}
}
@TestLogging("cluster:TRACE") // To ensure that we log cluster state events on TRACE level
public void testClusterStateUpdateLogging() throws Exception {
MockLogAppender mockAppender = new MockLogAppender();
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test1", "cluster.service", Level.DEBUG,
"*processing [test1]: took [1s] no change in cluster_state"));
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test2", "cluster.service", Level.TRACE,
"*failed to execute cluster state update in [2s]*"));
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test3", "cluster.service", Level.DEBUG,
"*processing [test3]: took [3s] done applying updated cluster_state (version: *, uuid: *)"));
Logger rootLogger = Logger.getRootLogger();
rootLogger.addAppender(mockAppender);
try {
final CountDownLatch latch = new CountDownLatch(4);
clusterService.currentTimeOverride = System.nanoTime();
clusterService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
clusterService.currentTimeOverride += TimeValue.timeValueSeconds(1).nanos();
return currentState;
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
}
@Override
public void onFailure(String source, Throwable t) {
fail();
}
});
clusterService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
clusterService.currentTimeOverride += TimeValue.timeValueSeconds(2).nanos();
throw new IllegalArgumentException("Testing handling of exceptions in the cluster state task");
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
fail();
}
@Override
public void onFailure(String source, Throwable t) {
latch.countDown();
}
});
clusterService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
clusterService.currentTimeOverride += TimeValue.timeValueSeconds(3).nanos();
return ClusterState.builder(currentState).incrementVersion().build();
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
}
@Override
public void onFailure(String source, Throwable t) {
fail();
}
});
// Additional update task to make sure all previous logging made it to the logger
// We don't check logging for this on since there is no guarantee that it will occur before our check
clusterService.submitStateUpdateTask("test4", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
return currentState;
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
}
@Override
public void onFailure(String source, Throwable t) {
fail();
}
});
latch.await();
} finally {
rootLogger.removeAppender(mockAppender);
}
mockAppender.assertAllExpectationsMatched();
}
@TestLogging("cluster:WARN") // To ensure that we log cluster state events on WARN level
public void testLongClusterStateUpdateLogging() throws Exception {
MockLogAppender mockAppender = new MockLogAppender();
mockAppender.addExpectation(new MockLogAppender.UnseenEventExpectation("test1 shouldn't see because setting is too low",
"cluster.service", Level.WARN, "*cluster state update task [test1] took [*] above the warn threshold of *"));
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test2", "cluster.service", Level.WARN,
"*cluster state update task [test2] took [32s] above the warn threshold of *"));
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test3", "cluster.service", Level.WARN,
"*cluster state update task [test3] took [33s] above the warn threshold of *"));
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test4", "cluster.service", Level.WARN,
"*cluster state update task [test4] took [34s] above the warn threshold of *"));
Logger rootLogger = Logger.getRootLogger();
rootLogger.addAppender(mockAppender);
try {
final CountDownLatch latch = new CountDownLatch(5);
final CountDownLatch processedFirstTask = new CountDownLatch(1);
clusterService.currentTimeOverride = System.nanoTime();
clusterService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
clusterService.currentTimeOverride += TimeValue.timeValueSeconds(1).nanos();
return currentState;
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
processedFirstTask.countDown();
}
@Override
public void onFailure(String source, Throwable t) {
fail();
}
});
processedFirstTask.await();
clusterService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
clusterService.currentTimeOverride += TimeValue.timeValueSeconds(32).nanos();
throw new IllegalArgumentException("Testing handling of exceptions in the cluster state task");
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
fail();
}
@Override
public void onFailure(String source, Throwable t) {
latch.countDown();
}
});
clusterService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
clusterService.currentTimeOverride += TimeValue.timeValueSeconds(33).nanos();
return ClusterState.builder(currentState).incrementVersion().build();
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
}
@Override
public void onFailure(String source, Throwable t) {
fail();
}
});
clusterService.submitStateUpdateTask("test4", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
clusterService.currentTimeOverride += TimeValue.timeValueSeconds(34).nanos();
return currentState;
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
}
@Override
public void onFailure(String source, Throwable t) {
fail();
}
});
// Additional update task to make sure all previous logging made it to the logger
// We don't check logging for this on since there is no guarantee that it will occur before our check
clusterService.submitStateUpdateTask("test5", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
return currentState;
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
}
@Override
public void onFailure(String source, Throwable t) {
fail();
}
});
latch.await();
} finally {
rootLogger.removeAppender(mockAppender);
}
mockAppender.assertAllExpectationsMatched();
}
private static class BlockingTask extends ClusterStateUpdateTask {
private final CountDownLatch latch = new CountDownLatch(1);
public BlockingTask(Priority priority) {
super(priority);
}
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
latch.await();
return currentState;
}
@Override
public void onFailure(String source, Throwable t) {
}
public void release() {
latch.countDown();
}
}
private static class PrioritizedTask extends ClusterStateUpdateTask {
private final CountDownLatch latch;
private final List<PrioritizedTask> tasks;
private PrioritizedTask(Priority priority, CountDownLatch latch, List<PrioritizedTask> tasks) {
super(priority);
this.latch = latch;
this.tasks = tasks;
}
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
tasks.add(this);
latch.countDown();
return currentState;
}
@Override
public void onFailure(String source, Throwable t) {
latch.countDown();
}
}
static class TimedClusterService extends InternalClusterService {
public volatile Long currentTimeOverride = null;
public TimedClusterService(Settings settings, OperationRouting operationRouting, ClusterSettings clusterSettings,
ThreadPool threadPool, ClusterName clusterName) {
super(settings, operationRouting, clusterSettings, threadPool, clusterName);
}
@Override
protected long currentTimeInNanos() {
if (currentTimeOverride != null) {
return currentTimeOverride;
}
return super.currentTimeInNanos();
}
}
}

View File

@ -177,13 +177,17 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
} }
private void configureUnicastCluster(int numberOfNodes, @Nullable int[] unicastHostsOrdinals, int minimumMasterNode) throws ExecutionException, InterruptedException { private void configureUnicastCluster(int numberOfNodes, @Nullable int[] unicastHostsOrdinals, int minimumMasterNode) throws ExecutionException, InterruptedException {
configureUnicastCluster(DEFAULT_SETTINGS, numberOfNodes, unicastHostsOrdinals, minimumMasterNode);
}
private void configureUnicastCluster(Settings settings, int numberOfNodes, @Nullable int[] unicastHostsOrdinals, int minimumMasterNode) throws ExecutionException, InterruptedException {
if (minimumMasterNode < 0) { if (minimumMasterNode < 0) {
minimumMasterNode = numberOfNodes / 2 + 1; minimumMasterNode = numberOfNodes / 2 + 1;
} }
logger.info("---> configured unicast"); logger.info("---> configured unicast");
// TODO: Rarely use default settings form some of these // TODO: Rarely use default settings form some of these
Settings nodeSettings = Settings.builder() Settings nodeSettings = Settings.builder()
.put(DEFAULT_SETTINGS) .put(settings)
.put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), minimumMasterNode) .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), minimumMasterNode)
.build(); .build();
@ -196,7 +200,6 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
} }
} }
/** /**
* Test that no split brain occurs under partial network partition. See https://github.com/elastic/elasticsearch/issues/2488 * Test that no split brain occurs under partial network partition. See https://github.com/elastic/elasticsearch/issues/2488
*/ */
@ -1075,25 +1078,40 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
* Tests that indices are properly deleted even if there is a master transition in between. * Tests that indices are properly deleted even if there is a master transition in between.
* Test for https://github.com/elastic/elasticsearch/issues/11665 * Test for https://github.com/elastic/elasticsearch/issues/11665
*/ */
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/16890")
public void testIndicesDeleted() throws Exception { public void testIndicesDeleted() throws Exception {
configureUnicastCluster(3, null, 2); final Settings settings = Settings.builder()
.put(DEFAULT_SETTINGS)
.put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "0s") // don't wait on isolated data node
.put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "30s") // wait till cluster state is committed
.build();
final String idxName = "test";
configureUnicastCluster(settings, 3, null, 2);
InternalTestCluster.Async<List<String>> masterNodes = internalCluster().startMasterOnlyNodesAsync(2); InternalTestCluster.Async<List<String>> masterNodes = internalCluster().startMasterOnlyNodesAsync(2);
InternalTestCluster.Async<String> dataNode = internalCluster().startDataOnlyNodeAsync(); InternalTestCluster.Async<String> dataNode = internalCluster().startDataOnlyNodeAsync();
dataNode.get(); dataNode.get();
masterNodes.get(); final List<String> allMasterEligibleNodes = masterNodes.get();
ensureStableCluster(3); ensureStableCluster(3);
assertAcked(prepareCreate("test")); assertAcked(prepareCreate("test"));
ensureYellow(); ensureYellow();
String masterNode1 = internalCluster().getMasterName(); final String masterNode1 = internalCluster().getMasterName();
NetworkPartition networkPartition = new NetworkUnresponsivePartition(masterNode1, dataNode.get(), getRandom()); NetworkPartition networkPartition = new NetworkUnresponsivePartition(masterNode1, dataNode.get(), getRandom());
internalCluster().setDisruptionScheme(networkPartition); internalCluster().setDisruptionScheme(networkPartition);
networkPartition.startDisrupting(); networkPartition.startDisrupting();
internalCluster().client(masterNode1).admin().indices().prepareDelete("test").setTimeout("1s").get(); // We know this will time out due to the partition, we check manually below to not proceed until
// the delete has been applied to the master node and the master eligible node.
internalCluster().client(masterNode1).admin().indices().prepareDelete(idxName).setTimeout("0s").get();
// Don't restart the master node until we know the index deletion has taken effect on master and the master eligible node.
assertBusy(() -> {
for (String masterNode : allMasterEligibleNodes) {
final ClusterState masterState = internalCluster().clusterService(masterNode).state();
assertTrue("index not deleted on " + masterNode, masterState.metaData().hasIndex(idxName) == false &&
masterState.status() == ClusterState.ClusterStateStatus.APPLIED);
}
});
internalCluster().restartNode(masterNode1, InternalTestCluster.EMPTY_CALLBACK); internalCluster().restartNode(masterNode1, InternalTestCluster.EMPTY_CALLBACK);
ensureYellow(); ensureYellow();
assertFalse(client().admin().indices().prepareExists("test").get().isExists()); assertFalse(client().admin().indices().prepareExists(idxName).get().isExists());
} }
protected NetworkPartition addRandomPartition() { protected NetworkPartition addRandomPartition() {

View File

@ -156,10 +156,11 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
ensureGreen(); ensureGreen();
refresh(); refresh();
Index index = resolveIndex("foo-copy");
for (IndicesService service : internalCluster().getDataNodeInstances(IndicesService.class)) { for (IndicesService service : internalCluster().getDataNodeInstances(IndicesService.class)) {
if (service.hasIndex("foo-copy")) {
IndexShard shard = service.indexServiceSafe("foo-copy").getShardOrNull(0); if (service.hasIndex(index)) {
IndexShard shard = service.indexServiceSafe(index).getShardOrNull(0);
if (shard.routingEntry().primary()) { if (shard.routingEntry().primary()) {
assertFalse(shard instanceof ShadowIndexShard); assertFalse(shard instanceof ShadowIndexShard);
} else { } else {
@ -201,8 +202,9 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats(IDX).clear().setTranslog(true).get(); IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats(IDX).clear().setTranslog(true).get();
assertEquals(2, indicesStatsResponse.getIndex(IDX).getPrimaries().getTranslog().estimatedNumberOfOperations()); assertEquals(2, indicesStatsResponse.getIndex(IDX).getPrimaries().getTranslog().estimatedNumberOfOperations());
assertEquals(2, indicesStatsResponse.getIndex(IDX).getTotal().getTranslog().estimatedNumberOfOperations()); assertEquals(2, indicesStatsResponse.getIndex(IDX).getTotal().getTranslog().estimatedNumberOfOperations());
Index index = resolveIndex(IDX);
for (IndicesService service : internalCluster().getInstances(IndicesService.class)) { for (IndicesService service : internalCluster().getInstances(IndicesService.class)) {
IndexService indexService = service.indexService(IDX); IndexService indexService = service.indexService(index);
if (indexService != null) { if (indexService != null) {
IndexShard shard = indexService.getShard(0); IndexShard shard = indexService.getShard(0);
TranslogStats translogStats = shard.translogStats(); TranslogStats translogStats = shard.translogStats();

View File

@ -168,7 +168,7 @@ public abstract class AbstractFieldDataTestCase extends ESSingleNodeTestCase {
protected Nested createNested(IndexSearcher searcher, Query parentFilter, Query childFilter) throws IOException { protected Nested createNested(IndexSearcher searcher, Query parentFilter, Query childFilter) throws IOException {
BitsetFilterCache s = indexService.cache().bitsetFilterCache(); BitsetFilterCache s = indexService.cache().bitsetFilterCache();
return new Nested(s.getBitSetProducer(parentFilter), searcher.createNormalizedWeight(childFilter, false)); return new Nested(s.getBitSetProducer(parentFilter), childFilter);
} }
public void testEmpty() throws Exception { public void testEmpty() throws Exception {

View File

@ -140,6 +140,7 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
protected static final String DATE_FIELD_NAME = "mapped_date"; protected static final String DATE_FIELD_NAME = "mapped_date";
protected static final String OBJECT_FIELD_NAME = "mapped_object"; protected static final String OBJECT_FIELD_NAME = "mapped_object";
protected static final String GEO_POINT_FIELD_NAME = "mapped_geo_point"; protected static final String GEO_POINT_FIELD_NAME = "mapped_geo_point";
protected static final String GEO_POINT_FIELD_MAPPING = "type=geo_point,lat_lon=true,geohash=true,geohash_prefix=true";
protected static final String GEO_SHAPE_FIELD_NAME = "mapped_geo_shape"; protected static final String GEO_SHAPE_FIELD_NAME = "mapped_geo_shape";
protected static final String[] MAPPED_FIELD_NAMES = new String[] { STRING_FIELD_NAME, INT_FIELD_NAME, DOUBLE_FIELD_NAME, protected static final String[] MAPPED_FIELD_NAMES = new String[] { STRING_FIELD_NAME, INT_FIELD_NAME, DOUBLE_FIELD_NAME,
BOOLEAN_FIELD_NAME, DATE_FIELD_NAME, OBJECT_FIELD_NAME, GEO_POINT_FIELD_NAME, GEO_SHAPE_FIELD_NAME }; BOOLEAN_FIELD_NAME, DATE_FIELD_NAME, OBJECT_FIELD_NAME, GEO_POINT_FIELD_NAME, GEO_SHAPE_FIELD_NAME };
@ -300,7 +301,7 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
BOOLEAN_FIELD_NAME, "type=boolean", BOOLEAN_FIELD_NAME, "type=boolean",
DATE_FIELD_NAME, "type=date", DATE_FIELD_NAME, "type=date",
OBJECT_FIELD_NAME, "type=object", OBJECT_FIELD_NAME, "type=object",
GEO_POINT_FIELD_NAME, "type=geo_point,lat_lon=true,geohash=true,geohash_prefix=true", GEO_POINT_FIELD_NAME, GEO_POINT_FIELD_MAPPING,
GEO_SHAPE_FIELD_NAME, "type=geo_shape" GEO_SHAPE_FIELD_NAME, "type=geo_shape"
).string()), MapperService.MergeReason.MAPPING_UPDATE, false); ).string()), MapperService.MergeReason.MAPPING_UPDATE, false);
// also add mappings for two inner field in the object field // also add mappings for two inner field in the object field

View File

@ -24,10 +24,12 @@ import org.apache.lucene.spatial.geopoint.search.GeoPointDistanceRangeQuery;
import org.apache.lucene.spatial.util.GeoDistanceUtils; import org.apache.lucene.spatial.util.GeoDistanceUtils;
import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.NumericUtils;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.geo.GeoDistance; import org.elasticsearch.common.geo.GeoDistance;
import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.geo.GeoUtils;
import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.common.unit.DistanceUnit;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.search.geo.GeoDistanceRangeQuery; import org.elasticsearch.index.search.geo.GeoDistanceRangeQuery;
import org.elasticsearch.test.geo.RandomGeoGenerator; import org.elasticsearch.test.geo.RandomGeoGenerator;
@ -296,6 +298,36 @@ public class GeoDistanceRangeQueryTests extends AbstractQueryTestCase<GeoDistanc
} }
} }
public void testNestedRangeQuery() throws IOException {
// create a nested geo_point type with a subfield named "geohash" (explicit testing for ISSUE #15179)
MapperService mapperService = queryShardContext().getMapperService();
String nestedMapping =
"{\"nested_doc\" : {\"properties\" : {" +
"\"locations\": {\"properties\": {" +
"\"geohash\": {\"type\": \"geo_point\"}}," +
"\"type\": \"nested\"}" +
"}}}";
mapperService.merge("nested_doc", new CompressedXContent(nestedMapping), MapperService.MergeReason.MAPPING_UPDATE, false);
// create a range query on the nested locations.geohash sub-field
String queryJson =
"{\n" +
" \"nested\": {\n" +
" \"path\": \"locations\",\n" +
" \"query\": {\n" +
" \"geo_distance_range\": {\n" +
" \"from\": \"0.0km\",\n" +
" \"to\" : \"200.0km\",\n" +
" \"locations.geohash\": \"s7ws01wyd7ws\"\n" +
" }\n" +
" }\n" +
" }\n" +
"}\n";
NestedQueryBuilder builder = (NestedQueryBuilder) parseQuery(queryJson);
QueryShardContext context = createShardContext();
builder.toQuery(context);
}
public void testFromJson() throws IOException { public void testFromJson() throws IOException {
String json = String json =
"{\n" + "{\n" +

View File

@ -63,7 +63,7 @@ public class GeohashCellQueryBuilderTests extends AbstractQueryTestCase<Builder>
assertThat(query, instanceOf(TermQuery.class)); assertThat(query, instanceOf(TermQuery.class));
TermQuery termQuery = (TermQuery) query; TermQuery termQuery = (TermQuery) query;
Term term = termQuery.getTerm(); Term term = termQuery.getTerm();
assertThat(term.field(), equalTo(queryBuilder.fieldName() + GeoPointFieldMapper.Names.GEOHASH_SUFFIX)); assertThat(term.field(), equalTo(queryBuilder.fieldName() + "." + GeoPointFieldMapper.Names.GEOHASH));
String geohash = queryBuilder.geohash(); String geohash = queryBuilder.geohash();
if (queryBuilder.precision() != null) { if (queryBuilder.precision() != null) {
int len = Math.min(queryBuilder.precision(), geohash.length()); int len = Math.min(queryBuilder.precision(), geohash.length());

View File

@ -52,6 +52,7 @@ public class NestedQueryBuilderTests extends AbstractQueryTestCase<NestedQueryBu
BOOLEAN_FIELD_NAME, "type=boolean", BOOLEAN_FIELD_NAME, "type=boolean",
DATE_FIELD_NAME, "type=date", DATE_FIELD_NAME, "type=date",
OBJECT_FIELD_NAME, "type=object", OBJECT_FIELD_NAME, "type=object",
GEO_POINT_FIELD_NAME, GEO_POINT_FIELD_MAPPING,
"nested1", "type=nested" "nested1", "type=nested"
).string()), MapperService.MergeReason.MAPPING_UPDATE, false); ).string()), MapperService.MergeReason.MAPPING_UPDATE, false);
} }

View File

@ -68,7 +68,7 @@ public class CustomQueryParserIT extends ESIntegTestCase {
private static QueryShardContext queryShardContext() { private static QueryShardContext queryShardContext() {
IndicesService indicesService = internalCluster().getDataNodeInstance(IndicesService.class); IndicesService indicesService = internalCluster().getDataNodeInstance(IndicesService.class);
return indicesService.indexServiceSafe("index").newQueryShardContext(); return indicesService.indexServiceSafe(resolveIndex("index")).newQueryShardContext();
} }
//see #11120 //see #11120

View File

@ -193,7 +193,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
ensureGreen(); ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
NodeEnvironment env = getInstanceFromNode(NodeEnvironment.class); NodeEnvironment env = getInstanceFromNode(NodeEnvironment.class);
IndexService test = indicesService.indexService("test"); IndexService test = indicesService.indexService(resolveIndex("test"));
IndexShard shard = test.getShardOrNull(0); IndexShard shard = test.getShardOrNull(0);
ShardStateMetaData shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId)); ShardStateMetaData shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId));
assertEquals(getShardStateMetadata(shard), shardStateMetaData); assertEquals(getShardStateMetadata(shard), shardStateMetaData);
@ -226,7 +226,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
ensureGreen(); ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
NodeEnvironment env = getInstanceFromNode(NodeEnvironment.class); NodeEnvironment env = getInstanceFromNode(NodeEnvironment.class);
IndexService test = indicesService.indexService("test"); IndexService test = indicesService.indexService(resolveIndex("test"));
IndexShard shard = test.getShardOrNull(0); IndexShard shard = test.getShardOrNull(0);
// fail shard // fail shard
shard.failShard("test shard fail", new CorruptIndexException("", "")); shard.failShard("test shard fail", new CorruptIndexException("", ""));
@ -281,7 +281,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0)).get()); assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0)).get());
ensureGreen("test"); ensureGreen("test");
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService indexService = indicesService.indexServiceSafe("test"); IndexService indexService = indicesService.indexServiceSafe(resolveIndex("test"));
IndexShard indexShard = indexService.getShardOrNull(0); IndexShard indexShard = indexService.getShardOrNull(0);
client().admin().indices().prepareDelete("test").get(); client().admin().indices().prepareDelete("test").get();
assertThat(indexShard.getActiveOperationsCount(), equalTo(0)); assertThat(indexShard.getActiveOperationsCount(), equalTo(0));
@ -303,7 +303,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0)).get()); assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0)).get());
ensureGreen("test"); ensureGreen("test");
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService indexService = indicesService.indexServiceSafe("test"); IndexService indexService = indicesService.indexServiceSafe(resolveIndex("test"));
IndexShard indexShard = indexService.getShardOrNull(0); IndexShard indexShard = indexService.getShardOrNull(0);
assertEquals(0, indexShard.getActiveOperationsCount()); assertEquals(0, indexShard.getActiveOperationsCount());
Releasable operation1 = indexShard.acquirePrimaryOperationLock(); Releasable operation1 = indexShard.acquirePrimaryOperationLock();
@ -320,11 +320,11 @@ public class IndexShardTests extends ESSingleNodeTestCase {
client().prepareIndex("test", "test").setSource("{}").get(); client().prepareIndex("test", "test").setSource("{}").get();
ensureGreen("test"); ensureGreen("test");
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
indicesService.indexService("test").getShardOrNull(0).checkIdle(0); indicesService.indexService(resolveIndex("test")).getShardOrNull(0).checkIdle(0);
assertBusy(() -> { assertBusy(() -> {
IndexStats indexStats = client().admin().indices().prepareStats("test").clear().get().getIndex("test"); IndexStats indexStats = client().admin().indices().prepareStats("test").clear().get().getIndex("test");
assertNotNull(indexStats.getShards()[0].getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID)); assertNotNull(indexStats.getShards()[0].getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID));
indicesService.indexService("test").getShardOrNull(0).checkIdle(0); indicesService.indexService(resolveIndex("test")).getShardOrNull(0).checkIdle(0);
} }
); );
IndexStats indexStats = client().admin().indices().prepareStats("test").get().getIndex("test"); IndexStats indexStats = client().admin().indices().prepareStats("test").get().getIndex("test");
@ -345,7 +345,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
ensureGreen(); ensureGreen();
client().prepareIndex("test", "bar", "1").setSource("{}").get(); client().prepareIndex("test", "bar", "1").setSource("{}").get();
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService("test"); IndexService test = indicesService.indexService(resolveIndex("test"));
IndexShard shard = test.getShardOrNull(0); IndexShard shard = test.getShardOrNull(0);
setDurability(shard, Translog.Durability.REQUEST); setDurability(shard, Translog.Durability.REQUEST);
assertFalse(shard.getEngine().getTranslog().syncNeeded()); assertFalse(shard.getEngine().getTranslog().syncNeeded());
@ -385,7 +385,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
client().prepareIndex("test", "test").setSource("{}").get(); client().prepareIndex("test", "test").setSource("{}").get();
ensureGreen("test"); ensureGreen("test");
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexShard test = indicesService.indexService("test").getShardOrNull(0); IndexShard test = indicesService.indexService(resolveIndex("test")).getShardOrNull(0);
assertEquals(versionCreated.luceneVersion, test.minimumCompatibleVersion()); assertEquals(versionCreated.luceneVersion, test.minimumCompatibleVersion());
client().prepareIndex("test", "test").setSource("{}").get(); client().prepareIndex("test", "test").setSource("{}").get();
assertEquals(versionCreated.luceneVersion, test.minimumCompatibleVersion()); assertEquals(versionCreated.luceneVersion, test.minimumCompatibleVersion());
@ -396,7 +396,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
public void testUpdatePriority() { public void testUpdatePriority() {
assertAcked(client().admin().indices().prepareCreate("test") assertAcked(client().admin().indices().prepareCreate("test")
.setSettings(IndexMetaData.SETTING_PRIORITY, 200)); .setSettings(IndexMetaData.SETTING_PRIORITY, 200));
IndexService indexService = getInstanceFromNode(IndicesService.class).indexService("test"); IndexService indexService = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test"));
assertEquals(200, indexService.getIndexSettings().getSettings().getAsInt(IndexMetaData.SETTING_PRIORITY, 0).intValue()); assertEquals(200, indexService.getIndexSettings().getSettings().getAsInt(IndexMetaData.SETTING_PRIORITY, 0).intValue());
client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_PRIORITY, 400).build()).get(); client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_PRIORITY, 400).build()).get();
assertEquals(400, indexService.getIndexSettings().getSettings().getAsInt(IndexMetaData.SETTING_PRIORITY, 0).intValue()); assertEquals(400, indexService.getIndexSettings().getSettings().getAsInt(IndexMetaData.SETTING_PRIORITY, 0).intValue());
@ -410,7 +410,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
SearchResponse response = client().prepareSearch("test").get(); SearchResponse response = client().prepareSearch("test").get();
assertHitCount(response, 1L); assertHitCount(response, 1L);
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService("test"); IndexService test = indicesService.indexService(resolveIndex("test"));
IndexShard shard = test.getShardOrNull(0); IndexShard shard = test.getShardOrNull(0);
ShardPath shardPath = shard.shardPath(); ShardPath shardPath = shard.shardPath();
Path dataPath = shardPath.getDataPath(); Path dataPath = shardPath.getDataPath();
@ -530,7 +530,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
createIndex("test"); createIndex("test");
ensureGreen(); ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService("test"); IndexService test = indicesService.indexService(resolveIndex("test"));
IndexShard shard = test.getShardOrNull(0); IndexShard shard = test.getShardOrNull(0);
ShardStats stats = new ShardStats(shard.routingEntry(), shard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), shard, new CommonStatsFlags()), shard.commitStats()); ShardStats stats = new ShardStats(shard.routingEntry(), shard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), shard, new CommonStatsFlags()), shard.commitStats());
assertEquals(shard.shardPath().getRootDataPath().toString(), stats.getDataPath()); assertEquals(shard.shardPath().getRootDataPath().toString(), stats.getDataPath());
@ -570,7 +570,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
ensureGreen(); ensureGreen();
client().prepareIndex("test_iol", "test", "0").setSource("{\"foo\" : \"bar\"}").setRefresh(true).get(); client().prepareIndex("test_iol", "test", "0").setSource("{\"foo\" : \"bar\"}").setRefresh(true).get();
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService("test_iol"); IndexService test = indicesService.indexService(resolveIndex("test_iol"));
IndexShard shard = test.getShardOrNull(0); IndexShard shard = test.getShardOrNull(0);
AtomicInteger preIndex = new AtomicInteger(); AtomicInteger preIndex = new AtomicInteger();
AtomicInteger postIndex = new AtomicInteger(); AtomicInteger postIndex = new AtomicInteger();
@ -669,7 +669,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
createIndex("test", settingsBuilder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.REQUEST).build()); createIndex("test", settingsBuilder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.REQUEST).build());
ensureGreen(); ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService("test"); IndexService test = indicesService.indexService(resolveIndex("test"));
IndexShard shard = test.getShardOrNull(0); IndexShard shard = test.getShardOrNull(0);
assertFalse(shard.shouldFlush()); assertFalse(shard.shouldFlush());
client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(133 /* size of the operation + header&footer*/, ByteSizeUnit.BYTES)).build()).get(); client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(133 /* size of the operation + header&footer*/, ByteSizeUnit.BYTES)).build()).get();
@ -703,7 +703,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
createIndex("test"); createIndex("test");
ensureGreen(); ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService("test"); IndexService test = indicesService.indexService(resolveIndex("test"));
final IndexShard shard = test.getShardOrNull(0); final IndexShard shard = test.getShardOrNull(0);
assertFalse(shard.shouldFlush()); assertFalse(shard.shouldFlush());
client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(133/* size of the operation + header&footer*/, ByteSizeUnit.BYTES)).build()).get(); client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(133/* size of the operation + header&footer*/, ByteSizeUnit.BYTES)).build()).get();
@ -749,7 +749,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
).get()); ).get());
ensureGreen(); ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService("test"); IndexService test = indicesService.indexService(resolveIndex("test"));
final IndexShard shard = test.getShardOrNull(0); final IndexShard shard = test.getShardOrNull(0);
CountDownLatch latch = new CountDownLatch(1); CountDownLatch latch = new CountDownLatch(1);
Thread recoveryThread = new Thread(() -> { Thread recoveryThread = new Thread(() -> {
@ -779,7 +779,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
).get()); ).get());
ensureGreen(); ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService("test"); IndexService test = indicesService.indexService(resolveIndex("test"));
final IndexShard shard = test.getShardOrNull(0); final IndexShard shard = test.getShardOrNull(0);
final int numThreads = randomIntBetween(2, 4); final int numThreads = randomIntBetween(2, 4);
Thread[] indexThreads = new Thread[numThreads]; Thread[] indexThreads = new Thread[numThreads];
@ -830,7 +830,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
createIndex("test"); createIndex("test");
ensureGreen(); ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService("test"); IndexService test = indicesService.indexService(resolveIndex("test"));
final IndexShard shard = test.getShardOrNull(0); final IndexShard shard = test.getShardOrNull(0);
int translogOps = 1; int translogOps = 1;
client().prepareIndex("test", "test", "0").setSource("{}").setRefresh(randomBoolean()).get(); client().prepareIndex("test", "test", "0").setSource("{}").setRefresh(randomBoolean()).get();
@ -861,7 +861,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
createIndex("test"); createIndex("test");
ensureGreen(); ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService("test"); IndexService test = indicesService.indexService(resolveIndex("test"));
final IndexShard shard = test.getShardOrNull(0); final IndexShard shard = test.getShardOrNull(0);
client().prepareIndex("test", "test", "0").setSource("{}").setRefresh(randomBoolean()).get(); client().prepareIndex("test", "test", "0").setSource("{}").setRefresh(randomBoolean()).get();
if (randomBoolean()) { if (randomBoolean()) {
@ -892,7 +892,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
ensureGreen(); ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
DiscoveryNode localNode = new DiscoveryNode("foo", DummyTransportAddress.INSTANCE, Version.CURRENT); DiscoveryNode localNode = new DiscoveryNode("foo", DummyTransportAddress.INSTANCE, Version.CURRENT);
IndexService test = indicesService.indexService("test"); IndexService test = indicesService.indexService(resolveIndex("test"));
final IndexShard shard = test.getShardOrNull(0); final IndexShard shard = test.getShardOrNull(0);
client().prepareIndex("test", "test", "0").setSource("{}").setRefresh(randomBoolean()).get(); client().prepareIndex("test", "test", "0").setSource("{}").setRefresh(randomBoolean()).get();
@ -945,7 +945,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
createIndex("test"); createIndex("test");
ensureGreen(); ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService("test"); IndexService test = indicesService.indexService(resolveIndex("test"));
final IndexShard shard = test.getShardOrNull(0); final IndexShard shard = test.getShardOrNull(0);
ShardRouting origRouting = shard.routingEntry(); ShardRouting origRouting = shard.routingEntry();
assertThat(shard.state(), equalTo(IndexShardState.STARTED)); assertThat(shard.state(), equalTo(IndexShardState.STARTED));
@ -967,8 +967,8 @@ public class IndexShardTests extends ESSingleNodeTestCase {
createIndex("test_target"); createIndex("test_target");
ensureGreen(); ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService("test"); IndexService test = indicesService.indexService(resolveIndex("test"));
IndexService test_target = indicesService.indexService("test_target"); IndexService test_target = indicesService.indexService(resolveIndex("test_target"));
final IndexShard test_shard = test.getShardOrNull(0); final IndexShard test_shard = test.getShardOrNull(0);
client().prepareIndex("test", "test", "0").setSource("{}").setRefresh(randomBoolean()).get(); client().prepareIndex("test", "test", "0").setSource("{}").setRefresh(randomBoolean()).get();
@ -1029,7 +1029,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
createIndex("test"); createIndex("test");
ensureGreen(); ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService indexService = indicesService.indexService("test"); IndexService indexService = indicesService.indexService(resolveIndex("test"));
IndexShard shard = indexService.getShardOrNull(0); IndexShard shard = indexService.getShardOrNull(0);
client().prepareIndex("test", "test", "0").setSource("{\"foo\" : \"bar\"}").setRefresh(true).get(); client().prepareIndex("test", "test", "0").setSource("{\"foo\" : \"bar\"}").setRefresh(true).get();
client().prepareIndex("test", "test", "1").setSource("{\"foobar\" : \"bar\"}").setRefresh(true).get(); client().prepareIndex("test", "test", "1").setSource("{\"foobar\" : \"bar\"}").setRefresh(true).get();
@ -1078,7 +1078,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
createIndex("test"); createIndex("test");
ensureGreen(); ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService indexService = indicesService.indexService("test"); IndexService indexService = indicesService.indexService(resolveIndex("test"));
IndexShard shard = indexService.getShardOrNull(0); IndexShard shard = indexService.getShardOrNull(0);
client().prepareIndex("test", "test", "0").setSource("{\"foo\" : \"bar\"}").setRefresh(true).get(); client().prepareIndex("test", "test", "0").setSource("{\"foo\" : \"bar\"}").setRefresh(true).get();
client().prepareIndex("test", "test", "1").setSource("{\"foobar\" : \"bar\"}").setRefresh(true).get(); client().prepareIndex("test", "test", "1").setSource("{\"foobar\" : \"bar\"}").setRefresh(true).get();
@ -1126,7 +1126,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
createIndex("test"); createIndex("test");
ensureGreen(); ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService indexService = indicesService.indexService("test"); IndexService indexService = indicesService.indexService(resolveIndex("test"));
IndexShard shard = indexService.getShardOrNull(0); IndexShard shard = indexService.getShardOrNull(0);
client().prepareIndex("test", "test", "0").setSource("{\"foo\" : \"bar\"}").setRefresh(true).get(); client().prepareIndex("test", "test", "0").setSource("{\"foo\" : \"bar\"}").setRefresh(true).get();
IndexSearcherWrapper wrapper = new IndexSearcherWrapper() { IndexSearcherWrapper wrapper = new IndexSearcherWrapper() {
@ -1179,7 +1179,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
.endObject().endObject().endObject()).get(); .endObject().endObject().endObject()).get();
ensureGreen(); ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService("testindexfortranslogsync"); IndexService test = indicesService.indexService(resolveIndex("testindexfortranslogsync"));
IndexShard shard = test.getShardOrNull(0); IndexShard shard = test.getShardOrNull(0);
ShardRouting routing = new ShardRouting(shard.routingEntry()); ShardRouting routing = new ShardRouting(shard.routingEntry());
test.removeShard(0, "b/c britta says so"); test.removeShard(0, "b/c britta says so");
@ -1206,7 +1206,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
.endObject().endObject().endObject()).get(); .endObject().endObject().endObject()).get();
ensureGreen(); ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService("index"); IndexService test = indicesService.indexService(resolveIndex("index"));
IndexShard shard = test.getShardOrNull(0); IndexShard shard = test.getShardOrNull(0);
ShardRouting routing = new ShardRouting(shard.routingEntry()); ShardRouting routing = new ShardRouting(shard.routingEntry());
test.removeShard(0, "b/c britta says so"); test.removeShard(0, "b/c britta says so");
@ -1235,7 +1235,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
.endObject().endObject().endObject()).get(); .endObject().endObject().endObject()).get();
ensureGreen(); ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService("index"); IndexService test = indicesService.indexService(resolveIndex("index"));
IndexShard shard = test.getShardOrNull(0); IndexShard shard = test.getShardOrNull(0);
ShardRouting routing = new ShardRouting(shard.routingEntry()); ShardRouting routing = new ShardRouting(shard.routingEntry());
test.removeShard(0, "b/c britta says so"); test.removeShard(0, "b/c britta says so");

View File

@ -161,7 +161,7 @@ public class IndexingMemoryControllerTests extends ESSingleNodeTestCase {
public void testShardAdditionAndRemoval() { public void testShardAdditionAndRemoval() {
createIndex("test", Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 3).put(SETTING_NUMBER_OF_REPLICAS, 0).build()); createIndex("test", Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 3).put(SETTING_NUMBER_OF_REPLICAS, 0).build());
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService("test"); IndexService test = indicesService.indexService(resolveIndex("test"));
MockController controller = new MockController(Settings.builder() MockController controller = new MockController(Settings.builder()
.put(IndexingMemoryController.INDEX_BUFFER_SIZE_SETTING, "4mb").build()); .put(IndexingMemoryController.INDEX_BUFFER_SIZE_SETTING, "4mb").build());
@ -194,7 +194,7 @@ public class IndexingMemoryControllerTests extends ESSingleNodeTestCase {
createIndex("test", Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 2).put(SETTING_NUMBER_OF_REPLICAS, 0).build()); createIndex("test", Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 2).put(SETTING_NUMBER_OF_REPLICAS, 0).build());
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService("test"); IndexService test = indicesService.indexService(resolveIndex("test"));
MockController controller = new MockController(Settings.builder() MockController controller = new MockController(Settings.builder()
.put(IndexingMemoryController.INDEX_BUFFER_SIZE_SETTING, "5mb") .put(IndexingMemoryController.INDEX_BUFFER_SIZE_SETTING, "5mb")
@ -248,7 +248,7 @@ public class IndexingMemoryControllerTests extends ESSingleNodeTestCase {
public void testThrottling() throws Exception { public void testThrottling() throws Exception {
createIndex("test", Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 3).put(SETTING_NUMBER_OF_REPLICAS, 0).build()); createIndex("test", Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 3).put(SETTING_NUMBER_OF_REPLICAS, 0).build());
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService("test"); IndexService test = indicesService.indexService(resolveIndex("test"));
MockController controller = new MockController(Settings.builder() MockController controller = new MockController(Settings.builder()
.put(IndexingMemoryController.INDEX_BUFFER_SIZE_SETTING, "4mb").build()); .put(IndexingMemoryController.INDEX_BUFFER_SIZE_SETTING, "4mb").build());
@ -316,7 +316,7 @@ public class IndexingMemoryControllerTests extends ESSingleNodeTestCase {
ensureGreen(); ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService indexService = indicesService.indexService("index"); IndexService indexService = indicesService.indexService(resolveIndex("index"));
IndexShard shard = indexService.getShardOrNull(0); IndexShard shard = indexService.getShardOrNull(0);
assertNotNull(shard); assertNotNull(shard);
@ -342,7 +342,7 @@ public class IndexingMemoryControllerTests extends ESSingleNodeTestCase {
@Override @Override
protected long getIndexBufferRAMBytesUsed(IndexShard shard) { protected long getIndexBufferRAMBytesUsed(IndexShard shard) {
return shard.getIndexBufferRAMBytesUsed(); return shard.getIndexBufferRAMBytesUsed();
} }
@Override @Override
protected void writeIndexingBufferAsync(IndexShard shard) { protected void writeIndexingBufferAsync(IndexShard shard) {

Some files were not shown because too many files have changed in this diff Show More