diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequest.java index 1d1249e1551..e92695d61e2 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequest.java @@ -53,12 +53,18 @@ public class CancelTasksRequest extends BaseTasksRequest { return super.match(task) && task instanceof CancellableTask; } - public CancelTasksRequest reason(String reason) { + /** + * Set the reason for canceling the task. + */ + public CancelTasksRequest setReason(String reason) { this.reason = reason; return this; } - public String reason() { + /** + * The reason for canceling the task. + */ + public String getReason() { return reason; } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java index b07e540d792..874f230587d 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java @@ -84,21 +84,21 @@ public class TransportCancelTasksAction extends TransportTasksAction operation) { - if (request.taskId().isSet() == false) { + if (request.getTaskId().isSet() == false) { // we are only checking one task, we can optimize it - CancellableTask task = taskManager.getCancellableTask(request.taskId().getId()); + CancellableTask task = taskManager.getCancellableTask(request.getTaskId().getId()); if (task != null) { if (request.match(task)) { operation.accept(task); } else { - throw new IllegalArgumentException("task [" + request.taskId() + "] doesn't support this operation"); + throw new IllegalArgumentException("task [" + request.getTaskId() + "] doesn't support this operation"); } } else { - if (taskManager.getTask(request.taskId().getId()) != null) { + if (taskManager.getTask(request.getTaskId().getId()) != null) { // The task exists, but doesn't support cancellation - throw new IllegalArgumentException("task [" + request.taskId() + "] doesn't support cancellation"); + throw new IllegalArgumentException("task [" + request.getTaskId() + "] doesn't support cancellation"); } else { - throw new ResourceNotFoundException("task [{}] doesn't support cancellation", request.taskId()); + throw new ResourceNotFoundException("task [{}] doesn't support cancellation", request.getTaskId()); } } } else { @@ -113,14 +113,14 @@ public class TransportCancelTasksAction extends TransportTasksAction removeBanOnNodes(cancellableTask, nodes)); - Set childNodes = taskManager.cancel(cancellableTask, request.reason(), banLock::onTaskFinished); + Set childNodes = taskManager.cancel(cancellableTask, request.getReason(), banLock::onTaskFinished); if (childNodes != null) { if (childNodes.isEmpty()) { logger.trace("cancelling task {} with no children", cancellableTask.getId()); return cancellableTask.taskInfo(clusterService.localNode(), false); } else { logger.trace("cancelling task {} with children on nodes [{}]", cancellableTask.getId(), childNodes); - setBanOnNodes(request.reason(), cancellableTask, childNodes, banLock); + setBanOnNodes(request.getReason(), cancellableTask, childNodes, banLock); return cancellableTask.taskInfo(clusterService.localNode(), false); } } else { diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksRequest.java index 6bf8ac3e1ef..3fe743fc36a 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksRequest.java @@ -31,31 +31,49 @@ import java.io.IOException; public class ListTasksRequest extends BaseTasksRequest { private boolean detailed = false; + private boolean waitForCompletion = false; /** * Should the detailed task information be returned. */ - public boolean detailed() { + public boolean getDetailed() { return this.detailed; } /** * Should the detailed task information be returned. */ - public ListTasksRequest detailed(boolean detailed) { + public ListTasksRequest setDetailed(boolean detailed) { this.detailed = detailed; return this; } + /** + * Should this request wait for all found tasks to complete? + */ + public boolean getWaitForCompletion() { + return waitForCompletion; + } + + /** + * Should this request wait for all found tasks to complete? + */ + public ListTasksRequest setWaitForCompletion(boolean waitForCompletion) { + this.waitForCompletion = waitForCompletion; + return this; + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); detailed = in.readBoolean(); + waitForCompletion = in.readBoolean(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeBoolean(detailed); + out.writeBoolean(waitForCompletion); } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksRequestBuilder.java index 2b462014f43..1385781125a 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksRequestBuilder.java @@ -35,7 +35,15 @@ public class ListTasksRequestBuilder extends TasksRequestBuilder { + private static final TimeValue WAIT_FOR_COMPLETION_POLL = timeValueMillis(100); + private static final TimeValue DEFAULT_WAIT_FOR_COMPLETION_TIMEOUT = timeValueSeconds(30); @Inject public TransportListTasksAction(Settings settings, ClusterName clusterName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { @@ -59,7 +67,34 @@ public class TransportListTasksAction extends TransportTasksAction operation) { + if (false == request.getWaitForCompletion()) { + super.processTasks(request, operation); + return; + } + // If we should wait for completion then we have to intercept every found task and wait for it to leave the manager. + TimeValue timeout = request.getTimeout(); + if (timeout == null) { + timeout = DEFAULT_WAIT_FOR_COMPLETION_TIMEOUT; + } + long timeoutTime = System.nanoTime() + timeout.nanos(); + super.processTasks(request, operation.andThen((Task t) -> { + while (System.nanoTime() - timeoutTime < 0) { + if (taskManager.getTask(t.getId()) == null) { + return; + } + try { + Thread.sleep(WAIT_FOR_COMPLETION_POLL.millis()); + } catch (InterruptedException e) { + throw new ElasticsearchException("Interrupted waiting for completion of [{}]", e, t); + } + } + throw new ElasticsearchTimeoutException("Timed out waiting for completion of [{}]", t); + })); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java index bc229d72b1b..7bc9f50252a 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java @@ -77,7 +77,7 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastByNodeAc @Override protected EmptyResult shardOperation(ClearIndicesCacheRequest request, ShardRouting shardRouting) { - IndexService service = indicesService.indexService(shardRouting.getIndexName()); + IndexService service = indicesService.indexService(shardRouting.index()); if (service != null) { IndexShard shard = service.getShardOrNull(shardRouting.id()); boolean clearedAtLeastOne = false; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java index fd45e22a171..f700a198e2c 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java @@ -93,7 +93,7 @@ public class TransportIndicesSegmentsAction extends TransportBroadcastByNodeActi @Override protected ShardSegments shardOperation(IndicesSegmentsRequest request, ShardRouting shardRouting) { - IndexService indexService = indicesService.indexServiceSafe(shardRouting.getIndexName()); + IndexService indexService = indicesService.indexServiceSafe(shardRouting.index()); IndexShard indexShard = indexService.getShard(shardRouting.id()); return new ShardSegments(indexShard.routingEntry(), indexShard.segments(request.verbose())); } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 30f6b03a116..f1eeae35e08 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -47,6 +47,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; @@ -104,8 +105,9 @@ public class TransportShardBulkAction extends TransportReplicationAction shardOperationOnPrimary(MetaData metaData, BulkShardRequest request) { - final IndexService indexService = indicesService.indexServiceSafe(request.index()); - final IndexShard indexShard = indexService.getShard(request.shardId().id()); + ShardId shardId = request.shardId(); + final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); + final IndexShard indexShard = indexService.getShard(shardId.getId()); long[] preVersions = new long[request.items().length]; VersionType[] preVersionTypes = new VersionType[request.items().length]; diff --git a/core/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java b/core/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java index 0139186562c..2a01eb4e1c6 100644 --- a/core/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java +++ b/core/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java @@ -32,8 +32,6 @@ import org.elasticsearch.search.SearchShardTarget; import java.io.IOException; -import static org.elasticsearch.search.SearchShardTarget.readSearchShardTarget; - /** * Represents a failure to search on a specific shard. */ @@ -106,7 +104,7 @@ public class ShardSearchFailure implements ShardOperationFailedException { @Override public int shardId() { if (shardTarget != null) { - return shardTarget.shardId(); + return shardTarget.shardId().id(); } return -1; } @@ -133,7 +131,7 @@ public class ShardSearchFailure implements ShardOperationFailedException { @Override public void readFrom(StreamInput in) throws IOException { if (in.readBoolean()) { - shardTarget = readSearchShardTarget(in); + shardTarget = new SearchShardTarget(in); } reason = in.readString(); status = RestStatus.readFrom(in); diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index 7fc18266816..18a7e5e0705 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -53,6 +53,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.shard.IndexShard; @@ -372,18 +373,18 @@ public abstract class TransportReplicationAction() { diff --git a/core/src/main/java/org/elasticsearch/action/support/tasks/BaseTasksRequest.java b/core/src/main/java/org/elasticsearch/action/support/tasks/BaseTasksRequest.java index f7da48a667b..f1045387259 100644 --- a/core/src/main/java/org/elasticsearch/action/support/tasks/BaseTasksRequest.java +++ b/core/src/main/java/org/elasticsearch/action/support/tasks/BaseTasksRequest.java @@ -71,7 +71,7 @@ public class BaseTasksRequest> extends * Sets the list of action masks for the actions that should be returned */ @SuppressWarnings("unchecked") - public final Request actions(String... actions) { + public final Request setActions(String... actions) { this.actions = actions; return (Request) this; } @@ -79,16 +79,16 @@ public class BaseTasksRequest> extends /** * Return the list of action masks for the actions that should be returned */ - public String[] actions() { + public String[] getActions() { return actions; } - public final String[] nodesIds() { + public final String[] getNodesIds() { return nodesIds; } @SuppressWarnings("unchecked") - public final Request nodesIds(String... nodesIds) { + public final Request setNodesIds(String... nodesIds) { this.nodesIds = nodesIds; return (Request) this; } @@ -98,12 +98,12 @@ public class BaseTasksRequest> extends * * By default tasks with any ids are returned. */ - public TaskId taskId() { + public TaskId getTaskId() { return taskId; } @SuppressWarnings("unchecked") - public final Request taskId(TaskId taskId) { + public final Request setTaskId(TaskId taskId) { this.taskId = taskId; return (Request) this; } @@ -112,29 +112,29 @@ public class BaseTasksRequest> extends /** * Returns the parent task id that tasks should be filtered by */ - public TaskId parentTaskId() { + public TaskId getParentTaskId() { return parentTaskId; } @SuppressWarnings("unchecked") - public Request parentTaskId(TaskId parentTaskId) { + public Request setParentTaskId(TaskId parentTaskId) { this.parentTaskId = parentTaskId; return (Request) this; } - public TimeValue timeout() { + public TimeValue getTimeout() { return this.timeout; } @SuppressWarnings("unchecked") - public final Request timeout(TimeValue timeout) { + public final Request setTimeout(TimeValue timeout) { this.timeout = timeout; return (Request) this; } @SuppressWarnings("unchecked") - public final Request timeout(String timeout) { + public final Request setTimeout(String timeout) { this.timeout = TimeValue.parseTimeValue(timeout, null, getClass().getSimpleName() + ".timeout"); return (Request) this; } @@ -162,11 +162,11 @@ public class BaseTasksRequest> extends } public boolean match(Task task) { - if (actions() != null && actions().length > 0 && Regex.simpleMatch(actions(), task.getAction()) == false) { + if (getActions() != null && getActions().length > 0 && Regex.simpleMatch(getActions(), task.getAction()) == false) { return false; } - if (taskId().isSet() == false) { - if(taskId().getId() != task.getId()) { + if (getTaskId().isSet() == false) { + if(getTaskId().getId() != task.getId()) { return false; } } diff --git a/core/src/main/java/org/elasticsearch/action/support/tasks/TasksRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/support/tasks/TasksRequestBuilder.java index a7265ce9998..a510a847c62 100644 --- a/core/src/main/java/org/elasticsearch/action/support/tasks/TasksRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/support/tasks/TasksRequestBuilder.java @@ -35,19 +35,19 @@ public class TasksRequestBuilder , Res @SuppressWarnings("unchecked") public final RequestBuilder setNodesIds(String... nodesIds) { - request.nodesIds(nodesIds); + request.setNodesIds(nodesIds); return (RequestBuilder) this; } @SuppressWarnings("unchecked") public final RequestBuilder setActions(String... actions) { - request.actions(actions); + request.setActions(actions); return (RequestBuilder) this; } @SuppressWarnings("unchecked") public final RequestBuilder setTimeout(TimeValue timeout) { - request.timeout(timeout); + request.setTimeout(timeout); return (RequestBuilder) this; } } diff --git a/core/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java b/core/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java index f10b9f23327..53c0d851997 100644 --- a/core/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java @@ -124,25 +124,25 @@ public abstract class TransportTasksAction< } protected String[] resolveNodes(TasksRequest request, ClusterState clusterState) { - if (request.taskId().isSet()) { - return clusterState.nodes().resolveNodesIds(request.nodesIds()); + if (request.getTaskId().isSet()) { + return clusterState.nodes().resolveNodesIds(request.getNodesIds()); } else { - return new String[]{request.taskId().getNodeId()}; + return new String[]{request.getTaskId().getNodeId()}; } } protected void processTasks(TasksRequest request, Consumer operation) { - if (request.taskId().isSet() == false) { + if (request.getTaskId().isSet() == false) { // we are only checking one task, we can optimize it - Task task = taskManager.getTask(request.taskId().getId()); + Task task = taskManager.getTask(request.getTaskId().getId()); if (task != null) { if (request.match(task)) { operation.accept((OperationTask) task); } else { - throw new ResourceNotFoundException("task [{}] doesn't support this operation", request.taskId()); + throw new ResourceNotFoundException("task [{}] doesn't support this operation", request.getTaskId()); } } else { - throw new ResourceNotFoundException("task [{}] is missing", request.taskId()); + throw new ResourceNotFoundException("task [{}] is missing", request.getTaskId()); } } else { for (Task task : taskManager.getTasks().values()) { @@ -224,8 +224,8 @@ public abstract class TransportTasksAction< } } else { TransportRequestOptions.Builder builder = TransportRequestOptions.builder(); - if (request.timeout() != null) { - builder.withTimeout(request.timeout()); + if (request.getTimeout() != null) { + builder.withTimeout(request.getTimeout()); } builder.withCompress(transportCompress()); for (int i = 0; i < nodesIds.length; i++) { diff --git a/core/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java b/core/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java index ccdf934958d..94b0e745a8e 100644 --- a/core/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java +++ b/core/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java @@ -75,12 +75,12 @@ public class TransportShardMultiTermsVectorAction extends TransportSingleShardAc @Override protected MultiTermVectorsShardResponse shardOperation(MultiTermVectorsShardRequest request, ShardId shardId) { - MultiTermVectorsShardResponse response = new MultiTermVectorsShardResponse(); + final MultiTermVectorsShardResponse response = new MultiTermVectorsShardResponse(); + final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); + final IndexShard indexShard = indexService.getShard(shardId.id()); for (int i = 0; i < request.locations.size(); i++) { TermVectorsRequest termVectorsRequest = request.requests.get(i); try { - IndexService indexService = indicesService.indexServiceSafe(request.index()); - IndexShard indexShard = indexService.getShard(shardId.id()); TermVectorsResponse termVectorsResponse = TermVectorsService.getTermVectors(indexShard, termVectorsRequest); termVectorsResponse.updateTookInMillis(termVectorsRequest.startTime()); response.add(request.locations.get(i), termVectorsResponse); diff --git a/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java b/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java index 0aefa825f2a..75feeb8fbca 100644 --- a/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java +++ b/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java @@ -51,6 +51,7 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndexAlreadyExistsException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.threadpool.ThreadPool; @@ -147,8 +148,8 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio @Override protected ShardIterator shards(ClusterState clusterState, UpdateRequest request) { - if (request.shardId() != -1) { - return clusterState.routingTable().index(request.concreteIndex()).shard(request.shardId()).primaryShardIt(); + if (request.getShardId() != null) { + return clusterState.routingTable().index(request.concreteIndex()).shard(request.getShardId().getId()).primaryShardIt(); } ShardIterator shardIterator = clusterService.operationRouting() .indexShards(clusterState, request.concreteIndex(), request.type(), request.id(), request.routing()); @@ -167,8 +168,9 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio } protected void shardOperation(final UpdateRequest request, final ActionListener listener, final int retryCount) { - final IndexService indexService = indicesService.indexServiceSafe(request.concreteIndex()); - final IndexShard indexShard = indexService.getShard(request.shardId()); + final ShardId shardId = request.getShardId(); + final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); + final IndexShard indexShard = indexService.getShard(shardId.getId()); final UpdateHelper.Result result = updateHelper.prepare(request, indexShard); switch (result.operation()) { case UPSERT: @@ -194,7 +196,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio if (e instanceof VersionConflictEngineException) { if (retryCount < request.retryOnConflict()) { logger.trace("Retry attempt [{}] of [{}] on version conflict on [{}][{}][{}]", - retryCount + 1, request.retryOnConflict(), request.index(), request.shardId(), request.id()); + retryCount + 1, request.retryOnConflict(), request.index(), request.getShardId(), request.id()); threadPool.executor(executor()).execute(new ActionRunnable(listener) { @Override protected void doRun() { @@ -267,9 +269,9 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio break; case NONE: UpdateResponse update = result.action(); - IndexService indexServiceOrNull = indicesService.indexService(request.concreteIndex()); + IndexService indexServiceOrNull = indicesService.indexService(shardId.getIndex()); if (indexServiceOrNull != null) { - IndexShard shard = indexService.getShardOrNull(request.shardId()); + IndexShard shard = indexService.getShardOrNull(shardId.getId()); if (shard != null) { shard.noopUpdate(request.type()); } diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java index 6bc69ed4d9c..14c127c0703 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java @@ -36,6 +36,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptParameterParser; import org.elasticsearch.script.ScriptParameterParser.ScriptParameterValue; @@ -88,7 +89,7 @@ public class UpdateRequest extends InstanceShardOperationRequest } public UpdateRequest(String index, String type, String id) { - this.index = index; + super(index); this.type = type; this.id = id; } @@ -195,7 +196,7 @@ public class UpdateRequest extends InstanceShardOperationRequest return parent; } - int shardId() { + public ShardId getShardId() { return this.shardId; } diff --git a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java index 6ac3c477fd7..433dd4498a4 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java @@ -225,7 +225,7 @@ final class BootstrapCheck { static class MaxNumberOfThreadsCheck implements Check { - private final long maxNumberOfThreadsThreshold = 1 << 15; + private final long maxNumberOfThreadsThreshold = 1 << 11; @Override public boolean check() { diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java b/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java index e851b7814da..c8a7924ba0f 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java @@ -23,6 +23,7 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.index.Index; import java.util.ArrayList; import java.util.Collections; @@ -120,7 +121,7 @@ public class ClusterChangedEvent { /** * Returns the indices deleted in this event */ - public List indicesDeleted() { + public List indicesDeleted() { // If the new cluster state has a new cluster UUID, the likely scenario is that a node was elected // master that has had its data directory wiped out, in which case we don't want to delete the indices and lose data; // rather we want to import them as dangling indices instead. So we check here if the cluster UUID differs from the previous @@ -131,17 +132,18 @@ public class ClusterChangedEvent { if (metaDataChanged() == false || isNewCluster()) { return Collections.emptyList(); } - List deleted = null; - for (ObjectCursor cursor : previousState.metaData().indices().keys()) { - String index = cursor.value; - if (!state.metaData().hasIndex(index)) { + List deleted = null; + for (ObjectCursor cursor : previousState.metaData().indices().values()) { + IndexMetaData index = cursor.value; + IndexMetaData current = state.metaData().index(index.getIndex().getName()); + if (current == null || index.getIndexUUID().equals(current.getIndexUUID()) == false) { if (deleted == null) { deleted = new ArrayList<>(); } - deleted.add(index); + deleted.add(index.getIndex()); } } - return deleted == null ? Collections.emptyList() : deleted; + return deleted == null ? Collections.emptyList() : deleted; } /** diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index cca633a7651..9bd4ba6112b 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -686,7 +686,7 @@ public class IndexNameExpressionResolver extends AbstractComponent { } private boolean isEmptyOrTrivialWildcard(List expressions) { - return expressions.isEmpty() || (expressions.size() == 1 && (MetaData.ALL.equals(expressions.get(0))) || Regex.isMatchAllPattern(expressions.get(0))); + return expressions.isEmpty() || (expressions.size() == 1 && (MetaData.ALL.equals(expressions.get(0)) || Regex.isMatchAllPattern(expressions.get(0)))); } private List resolveEmptyOrTrivialWildcard(IndicesOptions options, MetaData metaData, boolean assertEmpty) { diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 62f3ad802a0..177c46e5537 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -53,6 +53,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.env.Environment; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.NodeServicesProvider; import org.elasticsearch.index.mapper.DocumentMapper; @@ -188,7 +189,7 @@ public class MetaDataCreateIndexService extends AbstractComponent { @Override public ClusterState execute(ClusterState currentState) throws Exception { - boolean indexCreated = false; + Index createdIndex = null; String removalReason = null; try { validate(request, currentState); @@ -308,10 +309,9 @@ public class MetaDataCreateIndexService extends AbstractComponent { // Set up everything, now locally create the index to see that things are ok, and apply final IndexMetaData tmpImd = IndexMetaData.builder(request.index()).settings(actualIndexSettings).build(); // create the index here (on the master) to validate it can be created, as well as adding the mapping - indicesService.createIndex(nodeServicesProvider, tmpImd, Collections.emptyList()); - indexCreated = true; + final IndexService indexService = indicesService.createIndex(nodeServicesProvider, tmpImd, Collections.emptyList()); + createdIndex = indexService.index(); // now add the mappings - IndexService indexService = indicesService.indexServiceSafe(request.index()); MapperService mapperService = indexService.mapperService(); // first, add the default mapping if (mappings.containsKey(MapperService.DEFAULT_MAPPING)) { @@ -415,9 +415,9 @@ public class MetaDataCreateIndexService extends AbstractComponent { removalReason = "cleaning up after validating index on master"; return updatedState; } finally { - if (indexCreated) { + if (createdIndex != null) { // Index was already partially created - need to clean up - indicesService.removeIndex(request.index(), removalReason != null ? removalReason : "failed to create index"); + indicesService.removeIndex(createdIndex, removalReason != null ? removalReason : "failed to create index"); } } } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java index 52154bd2c04..1f0eaf0cda0 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.NodeServicesProvider; @@ -74,7 +75,7 @@ public class MetaDataIndexAliasesService extends AbstractComponent { @Override public ClusterState execute(final ClusterState currentState) { - List indicesToClose = new ArrayList<>(); + List indicesToClose = new ArrayList<>(); Map indices = new HashMap<>(); try { for (AliasAction aliasAction : request.actions()) { @@ -112,7 +113,7 @@ public class MetaDataIndexAliasesService extends AbstractComponent { logger.warn("[{}] failed to temporary create in order to apply alias action", e, indexMetaData.getIndex()); continue; } - indicesToClose.add(indexMetaData.getIndex().getName()); + indicesToClose.add(indexMetaData.getIndex()); } indices.put(indexMetaData.getIndex().getName(), indexService); } @@ -153,7 +154,7 @@ public class MetaDataIndexAliasesService extends AbstractComponent { } return currentState; } finally { - for (String index : indicesToClose) { + for (Index index : indicesToClose) { indicesService.removeIndex(index, "created for alias processing"); } } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java index df26df29800..6639f9bdbd6 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java @@ -19,12 +19,14 @@ package org.elasticsearch.cluster.metadata; +import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.close.CloseIndexClusterStateUpdateRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexClusterStateUpdateRequest; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.RestoreInProgress; import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; @@ -37,11 +39,14 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; import java.util.ArrayList; import java.util.Arrays; +import java.util.HashSet; import java.util.List; +import java.util.Set; /** * Service responsible for submitting open/close index requests @@ -78,7 +83,7 @@ public class MetaDataIndexStateService extends AbstractComponent { @Override public ClusterState execute(ClusterState currentState) { - List indicesToClose = new ArrayList<>(); + Set indicesToClose = new HashSet<>(); for (String index : request.indices()) { IndexMetaData indexMetaData = currentState.metaData().index(index); if (indexMetaData == null) { @@ -94,6 +99,28 @@ public class MetaDataIndexStateService extends AbstractComponent { return currentState; } + // Check if any of the indices to be closed are currently being restored from a snapshot and fail closing if such an index + // is found as closing an index that is being restored makes the index unusable (it cannot be recovered). + RestoreInProgress restore = currentState.custom(RestoreInProgress.TYPE); + if (restore != null) { + Set indicesToFail = null; + for (RestoreInProgress.Entry entry : restore.entries()) { + for (ObjectObjectCursor shard : entry.shards()) { + if (!shard.value.state().completed()) { + if (indicesToClose.contains(shard.key.getIndexName())) { + if (indicesToFail == null) { + indicesToFail = new HashSet<>(); + } + indicesToFail.add(shard.key.getIndexName()); + } + } + } + } + if (indicesToFail != null) { + throw new IllegalArgumentException("Cannot close indices that are being restored: " + indicesToFail); + } + } + logger.info("closing indices [{}]", indicesAsString); MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData()); diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index c06a5cc7c1c..51095a2d0de 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -36,6 +36,7 @@ import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.NodeServicesProvider; @@ -112,13 +113,13 @@ public class MetaDataMappingService extends AbstractComponent { MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData()); for (Map.Entry> entry : tasksPerIndex.entrySet()) { - String index = entry.getKey(); - IndexMetaData indexMetaData = mdBuilder.get(index); + IndexMetaData indexMetaData = mdBuilder.get(entry.getKey()); if (indexMetaData == null) { // index got deleted on us, ignore... - logger.debug("[{}] ignoring tasks - index meta data doesn't exist", index); + logger.debug("[{}] ignoring tasks - index meta data doesn't exist", entry.getKey()); continue; } + final Index index = indexMetaData.getIndex(); // the tasks lists to iterate over, filled with the list of mapping tasks, trying to keep // the latest (based on order) update mapping one per node List allIndexTasks = entry.getValue(); @@ -127,7 +128,7 @@ public class MetaDataMappingService extends AbstractComponent { if (indexMetaData.isSameUUID(task.indexUUID)) { hasTaskWithRightUUID = true; } else { - logger.debug("[{}] ignoring task [{}] - index meta data doesn't match task uuid", index, task); + logger.debug("{} ignoring task [{}] - index meta data doesn't match task uuid", index, task); } } if (hasTaskWithRightUUID == false) { @@ -136,7 +137,7 @@ public class MetaDataMappingService extends AbstractComponent { // construct the actual index if needed, and make sure the relevant mappings are there boolean removeIndex = false; - IndexService indexService = indicesService.indexService(index); + IndexService indexService = indicesService.indexService(indexMetaData.getIndex()); if (indexService == null) { // we need to create the index here, and add the current mapping to it, so we can merge indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.emptyList()); @@ -208,47 +209,57 @@ public class MetaDataMappingService extends AbstractComponent { class PutMappingExecutor implements ClusterStateTaskExecutor { @Override - public BatchResult execute(ClusterState currentState, List tasks) throws Exception { - Set indicesToClose = new HashSet<>(); + public BatchResult execute(ClusterState currentState, + List tasks) throws Exception { + Set indicesToClose = new HashSet<>(); BatchResult.Builder builder = BatchResult.builder(); try { // precreate incoming indices; for (PutMappingClusterStateUpdateRequest request : tasks) { - // failures here mean something is broken with our cluster state - fail all tasks by letting exceptions bubble up - for (String index : request.indices()) { - final IndexMetaData indexMetaData = currentState.metaData().index(index); - if (indexMetaData != null && indicesService.hasIndex(index) == false) { - // if we don't have the index, we will throw exceptions later; - indicesToClose.add(index); - IndexService indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.emptyList()); - // add mappings for all types, we need them for cross-type validation - for (ObjectCursor mapping : indexMetaData.getMappings().values()) { - indexService.mapperService().merge(mapping.value.type(), mapping.value.source(), MapperService.MergeReason.MAPPING_RECOVERY, request.updateAllTypes()); + final List indices = new ArrayList<>(request.indices().length); + try { + for (String index : request.indices()) { + final IndexMetaData indexMetaData = currentState.metaData().index(index); + if (indexMetaData != null) { + if (indicesService.hasIndex(indexMetaData.getIndex()) == false) { + // if the index does not exists we create it once, add all types to the mapper service and + // close it later once we are done with mapping update + indicesToClose.add(indexMetaData.getIndex()); + IndexService indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, + Collections.emptyList()); + // add mappings for all types, we need them for cross-type validation + for (ObjectCursor mapping : indexMetaData.getMappings().values()) { + indexService.mapperService().merge(mapping.value.type(), mapping.value.source(), + MapperService.MergeReason.MAPPING_RECOVERY, request.updateAllTypes()); + } + } + indices.add(indexMetaData.getIndex()); + } else { + // we didn't find the index in the clusterstate - maybe it was deleted + // NOTE: this doesn't fail the entire batch only the current PutMapping request we are processing + throw new IndexNotFoundException(index); } } - } - } - for (PutMappingClusterStateUpdateRequest request : tasks) { - try { - currentState = applyRequest(currentState, request); + currentState = applyRequest(currentState, request, indices); builder.success(request); } catch (Throwable t) { builder.failure(request, t); } } - return builder.build(currentState); } finally { - for (String index : indicesToClose) { + for (Index index : indicesToClose) { indicesService.removeIndex(index, "created for mapping processing"); } } } - private ClusterState applyRequest(ClusterState currentState, PutMappingClusterStateUpdateRequest request) throws IOException { + private ClusterState applyRequest(ClusterState currentState, PutMappingClusterStateUpdateRequest request, + List indices) throws IOException { String mappingType = request.type(); CompressedXContent mappingUpdateSource = new CompressedXContent(request.source()); - for (String index : request.indices()) { + final MetaData metaData = currentState.metaData(); + for (Index index : indices) { IndexService indexService = indicesService.indexServiceSafe(index); // try and parse it (no need to add it here) so we can bail early in case of parsing exception DocumentMapper newMapper; @@ -270,7 +281,7 @@ public class MetaDataMappingService extends AbstractComponent { // and a put mapping api call, so we don't which type did exist before. // Also the order of the mappings may be backwards. if (newMapper.parentFieldMapper().active()) { - IndexMetaData indexMetaData = currentState.metaData().index(index); + IndexMetaData indexMetaData = metaData.index(index); for (ObjectCursor mapping : indexMetaData.getMappings().values()) { if (newMapper.parentFieldMapper().type().equals(mapping.value.type())) { throw new IllegalArgumentException("can't add a _parent field that points to an already existing type"); @@ -290,11 +301,11 @@ public class MetaDataMappingService extends AbstractComponent { if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && !PercolatorService.TYPE_NAME.equals(mappingType) && mappingType.charAt(0) == '_') { throw new InvalidTypeNameException("Document mapping type name can't start with '_'"); } - MetaData.Builder builder = MetaData.builder(currentState.metaData()); - for (String index : request.indices()) { + MetaData.Builder builder = MetaData.builder(metaData); + for (Index index : indices) { // do the actual merge here on the master, and update the mapping source IndexService indexService = indicesService.indexService(index); - if (indexService == null) { + if (indexService == null) { // TODO this seems impossible given we use indexServiceSafe above continue; } @@ -326,7 +337,7 @@ public class MetaDataMappingService extends AbstractComponent { } } - IndexMetaData indexMetaData = currentState.metaData().index(index); + IndexMetaData indexMetaData = metaData.index(index); if (indexMetaData == null) { throw new IndexNotFoundException(index); } diff --git a/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java index 39f34ad867e..54e5738e78c 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -235,11 +235,7 @@ public class Lucene { @Override protected Object doBody(String segmentFileName) throws IOException { try (IndexInput input = directory.openInput(segmentFileName, IOContext.READ)) { - final int format = input.readInt(); - if (format == CodecUtil.CODEC_MAGIC) { - CodecUtil.checksumEntireFile(input); - } - // legacy.... + CodecUtil.checksumEntireFile(input); } return null; } diff --git a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index 69ef795812d..b8b75147740 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -35,6 +35,7 @@ import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.percolator.PercolatorQueriesRegistry; +import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.index.store.FsDirectoryService; import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.index.store.Store; @@ -44,6 +45,7 @@ import org.elasticsearch.indices.IndicesRequestCache; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; +import java.util.Map; import java.util.Set; import java.util.function.Predicate; @@ -133,8 +135,15 @@ public final class IndexScopedSettings extends AbstractScopedSettings { FsDirectoryService.INDEX_LOCK_FACTOR_SETTING, EngineConfig.INDEX_CODEC_SETTING, IndexWarmer.INDEX_NORMS_LOADING_SETTING, - // this sucks but we can't really validate all the analyzers/similarity in here - Setting.groupSetting("index.similarity.", false, Setting.Scope.INDEX), // this allows similarity settings to be passed + // validate that built-in similarities don't get redefined + Setting.groupSetting("index.similarity.", false, Setting.Scope.INDEX, (s) -> { + Map groups = s.getAsGroups(); + for (String key : SimilarityService.BUILT_IN.keySet()) { + if (groups.containsKey(key)) { + throw new IllegalArgumentException("illegal value for [index.similarity."+ key + "] cannot redefine built-in similarity"); + } + } + }), // this allows similarity settings to be passed Setting.groupSetting("index.analysis.", false, Setting.Scope.INDEX) // this allows analysis settings to be passed ))); diff --git a/core/src/main/java/org/elasticsearch/common/settings/Setting.java b/core/src/main/java/org/elasticsearch/common/settings/Setting.java index 0b4e43744a5..c31b905abbf 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/core/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -20,6 +20,7 @@ package org.elasticsearch.common.settings; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.support.ToXContentToBytes; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Strings; @@ -30,16 +31,19 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.MemorySizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.Function; +import java.util.function.Predicate; import java.util.regex.Pattern; import java.util.stream.Collectors; @@ -177,7 +181,7 @@ public class Setting extends ToXContentToBytes { /** * Returns true iff this setting is present in the given settings object. Otherwise false */ - public final boolean exists(Settings settings) { + public boolean exists(Settings settings) { return settings.get(getKey()) != null; } @@ -505,17 +509,45 @@ public class Setting extends ToXContentToBytes { throw new ElasticsearchException(ex); } } - public static Setting groupSetting(String key, boolean dynamic, Scope scope) { + return groupSetting(key, dynamic, scope, (s) -> {}); + } + public static Setting groupSetting(String key, boolean dynamic, Scope scope, Consumer validator) { return new Setting(new GroupKey(key), (s) -> "", (s) -> null, dynamic, scope) { @Override public boolean isGroupSetting() { return true; } + @Override + public String getRaw(Settings settings) { + Settings subSettings = get(settings); + try { + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + subSettings.toXContent(builder, EMPTY_PARAMS); + builder.endObject(); + return builder.string(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + @Override public Settings get(Settings settings) { - return settings.getByPrefix(getKey()); + Settings byPrefix = settings.getByPrefix(getKey()); + validator.accept(byPrefix); + return byPrefix; + } + + @Override + public boolean exists(Settings settings) { + for (Map.Entry entry : settings.getAsMap().entrySet()) { + if (entry.getKey().startsWith(key)) { + return true; + } + } + return false; } @Override diff --git a/core/src/main/java/org/elasticsearch/discovery/Discovery.java b/core/src/main/java/org/elasticsearch/discovery/Discovery.java index b96417381ff..778e2d15053 100644 --- a/core/src/main/java/org/elasticsearch/discovery/Discovery.java +++ b/core/src/main/java/org/elasticsearch/discovery/Discovery.java @@ -26,7 +26,6 @@ import org.elasticsearch.cluster.routing.RoutingService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.node.service.NodeService; import java.io.IOException; @@ -41,11 +40,6 @@ public interface Discovery extends LifecycleComponent { String nodeDescription(); - /** - * Here as a hack to solve dep injection problem... - */ - void setNodeService(@Nullable NodeService nodeService); - /** * Another hack to solve dep injection problem..., note, this will be called before * any start is called. diff --git a/core/src/main/java/org/elasticsearch/discovery/local/LocalDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/local/LocalDiscovery.java index 661de5260c1..0462d6a8d8d 100644 --- a/core/src/main/java/org/elasticsearch/discovery/local/LocalDiscovery.java +++ b/core/src/main/java/org/elasticsearch/discovery/local/LocalDiscovery.java @@ -33,7 +33,6 @@ import org.elasticsearch.cluster.routing.RoutingService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.inject.internal.Nullable; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.ClusterSettings; @@ -45,7 +44,6 @@ import org.elasticsearch.discovery.BlockingClusterStatePublishResponseHandler; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.DiscoveryStats; -import org.elasticsearch.node.service.NodeService; import java.util.HashSet; import java.util.Queue; @@ -84,11 +82,6 @@ public class LocalDiscovery extends AbstractLifecycleComponent implem this.discoverySettings = new DiscoverySettings(settings, clusterSettings); } - @Override - public void setNodeService(@Nullable NodeService nodeService) { - // nothing to do here - } - @Override public void setRoutingService(RoutingService routingService) { this.routingService = routingService; diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/DiscoveryNodesProvider.java b/core/src/main/java/org/elasticsearch/discovery/zen/DiscoveryNodesProvider.java index f845cbe1fed..b9ce7901369 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/DiscoveryNodesProvider.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/DiscoveryNodesProvider.java @@ -20,8 +20,6 @@ package org.elasticsearch.discovery.zen; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.node.service.NodeService; /** * @@ -30,6 +28,4 @@ public interface DiscoveryNodesProvider { DiscoveryNodes nodes(); - @Nullable - NodeService nodeService(); } diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index fb0f7a61966..c0dd78b4e5f 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -60,7 +60,6 @@ import org.elasticsearch.discovery.zen.ping.ZenPing; import org.elasticsearch.discovery.zen.ping.ZenPingService; import org.elasticsearch.discovery.zen.publish.PendingClusterStateStats; import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction; -import org.elasticsearch.node.service.NodeService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.EmptyTransportResponseHandler; import org.elasticsearch.transport.TransportChannel; @@ -137,10 +136,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen /** counts the time this node has joined the cluster or have elected it self as master */ private final AtomicLong clusterJoinsCounter = new AtomicLong(); - @Nullable - private NodeService nodeService; - - // must initialized in doStart(), when we have the routingService set private volatile NodeJoinController nodeJoinController; @@ -192,11 +187,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen transportService.registerRequestHandler(DISCOVERY_REJOIN_ACTION_NAME, RejoinClusterRequest::new, ThreadPool.Names.SAME, new RejoinClusterRequestHandler()); } - @Override - public void setNodeService(@Nullable NodeService nodeService) { - this.nodeService = nodeService; - } - @Override public void setRoutingService(RoutingService routingService) { this.routingService = routingService; @@ -292,11 +282,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen return clusterService.state().nodes(); } - @Override - public NodeService nodeService() { - return this.nodeService; - } - @Override public boolean nodeHasJoinedClusterOnce() { return clusterJoinsCounter.get() > 0; diff --git a/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java b/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java index e2b6f0d27ed..c94e1370c01 100644 --- a/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java +++ b/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java @@ -104,6 +104,8 @@ public abstract class ReplicaShardAllocator extends AbstractComponent { && matchingNodes.isNodeMatchBySyncID(nodeWithHighestMatch) == true) { // we found a better match that has a full sync id match, the existing allocation is not fully synced // so we found a better one, cancel this one + logger.debug("cancelling allocation of replica on [{}], sync id match found on node [{}]", + currentNode, nodeWithHighestMatch); it.moveToUnassigned(new UnassignedInfo(UnassignedInfo.Reason.REALLOCATED_REPLICA, "existing allocation of replica to [" + currentNode + "] cancelled, sync id match found on node [" + nodeWithHighestMatch + "]", null, allocation.getCurrentNanoTime(), System.currentTimeMillis())); diff --git a/core/src/main/java/org/elasticsearch/index/Index.java b/core/src/main/java/org/elasticsearch/index/Index.java index 80bf3c31b44..983b977d611 100644 --- a/core/src/main/java/org/elasticsearch/index/Index.java +++ b/core/src/main/java/org/elasticsearch/index/Index.java @@ -19,6 +19,7 @@ package org.elasticsearch.index; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -50,7 +51,14 @@ public class Index implements Writeable { @Override public String toString() { - return "[" + name + "]"; + /* + * If we have a uuid we put it in the toString so it'll show up in logs which is useful as more and more things use the uuid rather + * than the name as the lookup key for the index. + */ + if (ClusterState.UNKNOWN_UUID.equals(uuid)) { + return "[" + name + "]"; + } + return "[" + name + "/" + uuid + "]"; } @Override diff --git a/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java b/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java index 5452daa7f07..d6fa552b203 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java +++ b/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java @@ -36,6 +36,7 @@ import java.util.concurrent.TimeUnit; /** */ public final class IndexingSlowLog implements IndexingOperationListener { + private final Index index; private boolean reformat; private long indexWarnThreshold; private long indexInfoThreshold; @@ -51,7 +52,6 @@ public final class IndexingSlowLog implements IndexingOperationListener { private SlowLogLevel level; private final ESLogger indexLogger; - private final ESLogger deleteLogger; private static final String INDEX_INDEXING_SLOWLOG_PREFIX = "index.indexing.slowlog"; public static final Setting INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING = Setting.timeSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.warn", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX); @@ -75,16 +75,8 @@ public final class IndexingSlowLog implements IndexingOperationListener { }, true, Setting.Scope.INDEX); IndexingSlowLog(IndexSettings indexSettings) { - this(indexSettings, Loggers.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX + ".index"), - Loggers.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX + ".delete")); - } - - /** - * Build with the specified loggers. Only used to testing. - */ - IndexingSlowLog(IndexSettings indexSettings, ESLogger indexLogger, ESLogger deleteLogger) { - this.indexLogger = indexLogger; - this.deleteLogger = deleteLogger; + this.indexLogger = Loggers.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX + ".index", indexSettings.getSettings()); + this.index = indexSettings.getIndex(); indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING, this::setReformat); this.reformat = indexSettings.getValue(INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING); @@ -109,7 +101,6 @@ public final class IndexingSlowLog implements IndexingOperationListener { private void setLevel(SlowLogLevel level) { this.level = level; this.indexLogger.setLevel(level.name()); - this.deleteLogger.setLevel(level.name()); } private void setWarnThreshold(TimeValue warnThreshold) { @@ -141,13 +132,13 @@ public final class IndexingSlowLog implements IndexingOperationListener { private void postIndexing(ParsedDocument doc, long tookInNanos) { if (indexWarnThreshold >= 0 && tookInNanos > indexWarnThreshold) { - indexLogger.warn("{}", new SlowLogParsedDocumentPrinter(doc, tookInNanos, reformat, maxSourceCharsToLog)); + indexLogger.warn("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); } else if (indexInfoThreshold >= 0 && tookInNanos > indexInfoThreshold) { - indexLogger.info("{}", new SlowLogParsedDocumentPrinter(doc, tookInNanos, reformat, maxSourceCharsToLog)); + indexLogger.info("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); } else if (indexDebugThreshold >= 0 && tookInNanos > indexDebugThreshold) { - indexLogger.debug("{}", new SlowLogParsedDocumentPrinter(doc, tookInNanos, reformat, maxSourceCharsToLog)); + indexLogger.debug("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); } else if (indexTraceThreshold >= 0 && tookInNanos > indexTraceThreshold) { - indexLogger.trace("{}", new SlowLogParsedDocumentPrinter(doc, tookInNanos, reformat, maxSourceCharsToLog)); + indexLogger.trace("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); } } @@ -156,9 +147,11 @@ public final class IndexingSlowLog implements IndexingOperationListener { private final long tookInNanos; private final boolean reformat; private final int maxSourceCharsToLog; + private final Index index; - SlowLogParsedDocumentPrinter(ParsedDocument doc, long tookInNanos, boolean reformat, int maxSourceCharsToLog) { + SlowLogParsedDocumentPrinter(Index index, ParsedDocument doc, long tookInNanos, boolean reformat, int maxSourceCharsToLog) { this.doc = doc; + this.index = index; this.tookInNanos = tookInNanos; this.reformat = reformat; this.maxSourceCharsToLog = maxSourceCharsToLog; @@ -167,6 +160,7 @@ public final class IndexingSlowLog implements IndexingOperationListener { @Override public String toString() { StringBuilder sb = new StringBuilder(); + sb.append(index).append(" "); sb.append("took[").append(TimeValue.timeValueNanos(tookInNanos)).append("], took_millis[").append(TimeUnit.NANOSECONDS.toMillis(tookInNanos)).append("], "); sb.append("type[").append(doc.type()).append("], "); sb.append("id[").append(doc.id()).append("], "); diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java index ffa23bf56e4..172e16d8f35 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java @@ -20,10 +20,14 @@ package org.elasticsearch.index.fielddata; import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.ReaderUtil; import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.FieldComparatorSource; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.SortField; import org.apache.lucene.search.Weight; @@ -122,11 +126,11 @@ public interface IndexFieldData extends IndexCompone public static class Nested { private final BitSetProducer rootFilter; - private final Weight innerFilter; + private final Query innerQuery; - public Nested(BitSetProducer rootFilter, Weight innerFilter) { + public Nested(BitSetProducer rootFilter, Query innerQuery) { this.rootFilter = rootFilter; - this.innerFilter = innerFilter; + this.innerQuery = innerQuery; } /** @@ -140,7 +144,10 @@ public interface IndexFieldData extends IndexCompone * Get a {@link DocIdSet} that matches the inner documents. */ public DocIdSetIterator innerDocs(LeafReaderContext ctx) throws IOException { - Scorer s = innerFilter.scorer(ctx); + final IndexReaderContext topLevelCtx = ReaderUtil.getTopLevelContext(ctx); + IndexSearcher indexSearcher = new IndexSearcher(topLevelCtx); + Weight weight = indexSearcher.createNormalizedWeight(innerQuery, false); + Scorer s = weight.scorer(ctx); return s == null ? null : s.iterator(); } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java index 1a1c1592d7e..f72533d30cf 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java @@ -28,6 +28,8 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -56,13 +58,13 @@ import static org.elasticsearch.index.mapper.core.TypeParsers.parseMultiField; */ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements ArrayValueMapperParser { public static final String CONTENT_TYPE = "geo_point"; + protected static final DeprecationLogger deprecationLogger = new DeprecationLogger(Loggers.getLogger(BaseGeoPointFieldMapper.class)); public static class Names { public static final String LAT = "lat"; public static final String LAT_SUFFIX = "." + LAT; public static final String LON = "lon"; public static final String LON_SUFFIX = "." + LON; public static final String GEOHASH = "geohash"; - public static final String GEOHASH_SUFFIX = "." + GEOHASH; public static final String IGNORE_MALFORMED = "ignore_malformed"; } @@ -194,9 +196,13 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr String propName = Strings.toUnderscoreCase(entry.getKey()); Object propNode = entry.getValue(); if (propName.equals("lat_lon")) { + deprecationLogger.deprecated(CONTENT_TYPE + " lat_lon parameter is deprecated and will be removed " + + "in the next major release"); builder.enableLatLon(XContentMapValues.lenientNodeBooleanValue(propNode)); iterator.remove(); } else if (propName.equals("precision_step")) { + deprecationLogger.deprecated(CONTENT_TYPE + " precision_step parameter is deprecated and will be removed " + + "in the next major release"); builder.precisionStep(XContentMapValues.nodeIntegerValue(propNode)); iterator.remove(); } else if (propName.equals("geohash")) { diff --git a/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java index 05c2a74bb9f..2c906dc7cb1 100644 --- a/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java @@ -250,7 +250,8 @@ public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder use prefix encoded postings format diff --git a/core/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java index 784c924efcf..b11b57df175 100644 --- a/core/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java @@ -219,18 +219,18 @@ public class GeoDistanceQueryBuilder extends AbstractQueryBuilder similaritySettings = this.indexSettings.getSettings().getGroups(IndexModule.SIMILARITY_SETTINGS_PREFIX); for (Map.Entry entry : similaritySettings.entrySet()) { String name = entry.getKey(); + // Starting with v5.0 indices, it should no longer be possible to redefine built-in similarities + if(BUILT_IN.containsKey(name) && indexSettings.getIndexVersionCreated().onOrAfter(Version.V_5_0_0)) { + throw new IllegalArgumentException("Cannot redefine built-in Similarity [" + name + "]"); + } Settings settings = entry.getValue(); String typeName = settings.get("type"); if (typeName == null) { @@ -76,9 +81,16 @@ public final class SimilarityService extends AbstractIndexComponent { } providers.put(name, factory.apply(name, settings)); } - addSimilarities(similaritySettings, providers, DEFAULTS); + for (Map.Entry entry : addSimilarities(similaritySettings, DEFAULTS).entrySet()) { + // Avoid overwriting custom providers for indices older that v5.0 + if (providers.containsKey(entry.getKey()) && indexSettings.getIndexVersionCreated().before(Version.V_5_0_0)) { + continue; + } + providers.put(entry.getKey(), entry.getValue()); + } this.similarities = providers; - defaultSimilarity = providers.get(SimilarityService.DEFAULT_SIMILARITY).get(); + defaultSimilarity = (providers.get("default") != null) ? providers.get("default").get() + : providers.get(SimilarityService.DEFAULT_SIMILARITY).get(); // Expert users can configure the base type as being different to default, but out-of-box we use default. baseSimilarity = (providers.get("base") != null) ? providers.get("base").get() : defaultSimilarity; @@ -90,7 +102,9 @@ public final class SimilarityService extends AbstractIndexComponent { defaultSimilarity; } - private void addSimilarities(Map similaritySettings, Map providers, Map> similarities) { + private Map addSimilarities(Map similaritySettings, + Map> similarities) { + Map providers = new HashMap<>(similarities.size()); for (Map.Entry> entry : similarities.entrySet()) { String name = entry.getKey(); BiFunction factory = entry.getValue(); @@ -100,12 +114,17 @@ public final class SimilarityService extends AbstractIndexComponent { } providers.put(name, factory.apply(name, settings)); } + return providers; } public SimilarityProvider getSimilarity(String name) { return similarities.get(name); } + public SimilarityProvider getDefaultSimilarity() { + return similarities.get("default"); + } + static class PerFieldSimilarity extends PerFieldSimilarityWrapper { private final Similarity defaultSimilarity; diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesService.java b/core/src/main/java/org/elasticsearch/indices/IndicesService.java index 440a11a1904..6fd833471ed 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -103,6 +103,7 @@ import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Predicate; +import java.util.stream.Collectors; import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; @@ -185,14 +186,14 @@ public class IndicesService extends AbstractLifecycleComponent i ExecutorService indicesStopExecutor = Executors.newFixedThreadPool(5, EsExecutors.daemonThreadFactory("indices_shutdown")); // Copy indices because we modify it asynchronously in the body of the loop - Set indices = new HashSet<>(this.indices.keySet()); + final Set indices = this.indices.values().stream().map(s -> s.index()).collect(Collectors.toSet()); final CountDownLatch latch = new CountDownLatch(indices.size()); - for (final String index : indices) { + for (final Index index : indices) { indicesStopExecutor.execute(() -> { try { removeIndex(index, "shutdown", false); } catch (Throwable e) { - logger.warn("failed to remove index on stop [" + index + "]", e); + logger.warn("failed to remove index on stop " + index + "", e); } finally { latch.countDown(); } @@ -256,7 +257,7 @@ public class IndicesService extends AbstractLifecycleComponent i } Map> statsByShard = new HashMap<>(); - for (IndexService indexService : indices.values()) { + for (IndexService indexService : this) { for (IndexShard indexShard : indexService) { try { if (indexShard.routingEntry() == null) { @@ -290,17 +291,8 @@ public class IndicesService extends AbstractLifecycleComponent i return indices.values().iterator(); } - public boolean hasIndex(String index) { - return indices.containsKey(index); - } - - /** - * Returns an IndexService for the specified index if exists otherwise returns null. - * - */ - @Nullable - public IndexService indexService(String index) { - return indices.get(index); + public boolean hasIndex(Index index) { + return indices.containsKey(index.getUUID()); } /** @@ -309,33 +301,21 @@ public class IndicesService extends AbstractLifecycleComponent i */ @Nullable public IndexService indexService(Index index) { - return indexService(index.getName()); - } - - /** - * Returns an IndexService for the specified index if exists otherwise a {@link IndexNotFoundException} is thrown. - */ - public IndexService indexServiceSafe(String index) { - IndexService indexService = indexService(index); - if (indexService == null) { - throw new IndexNotFoundException(index); - } - return indexService; + return indices.get(index.getUUID()); } /** * Returns an IndexService for the specified index if exists otherwise a {@link IndexNotFoundException} is thrown. */ public IndexService indexServiceSafe(Index index) { - IndexService indexService = indexServiceSafe(index.getName()); - if (indexService.indexUUID().equals(index.getUUID()) == false) { + IndexService indexService = indices.get(index.getUUID()); + if (indexService == null) { throw new IndexNotFoundException(index); } + assert indexService.indexUUID().equals(index.getUUID()) : "uuid mismatch local: " + indexService.indexUUID() + " incoming: " + index.getUUID(); return indexService; } - - /** * Creates a new {@link IndexService} for the given metadata. * @param indexMetaData the index metadata to create the index for @@ -346,10 +326,13 @@ public class IndicesService extends AbstractLifecycleComponent i if (!lifecycle.started()) { throw new IllegalStateException("Can't create an index [" + indexMetaData.getIndex() + "], node is closed"); } + if (indexMetaData.getIndexUUID().equals(IndexMetaData.INDEX_UUID_NA_VALUE)) { + throw new IllegalArgumentException("index must have a real UUID found value: [" + indexMetaData.getIndexUUID() + "]"); + } final Index index = indexMetaData.getIndex(); final Predicate indexNameMatcher = (indexExpression) -> indexNameExpressionResolver.matchesIndex(index.getName(), indexExpression, clusterService.state()); final IndexSettings idxSettings = new IndexSettings(indexMetaData, this.settings, indexNameMatcher, indexScopeSetting); - if (indices.containsKey(index.getName())) { + if (hasIndex(index)) { throw new IndexAlreadyExistsException(index); } logger.debug("creating Index [{}], shards [{}]/[{}{}]", @@ -378,7 +361,7 @@ public class IndicesService extends AbstractLifecycleComponent i try { assert indexService.getIndexEventListener() == listener; listener.afterIndexCreated(indexService); - indices = newMapBuilder(indices).put(index.getName(), indexService).immutableMap(); + indices = newMapBuilder(indices).put(index.getUUID(), indexService).immutableMap(); success = true; return indexService; } finally { @@ -395,22 +378,24 @@ public class IndicesService extends AbstractLifecycleComponent i * @param index the index to remove * @param reason the high level reason causing this removal */ - public void removeIndex(String index, String reason) { + public void removeIndex(Index index, String reason) { removeIndex(index, reason, false); } - private void removeIndex(String index, String reason, boolean delete) { + private void removeIndex(Index index, String reason, boolean delete) { + final String indexName = index.getName(); try { final IndexService indexService; final IndexEventListener listener; synchronized (this) { - if (indices.containsKey(index) == false) { + if (hasIndex(index) == false) { return; } - logger.debug("[{}] closing ... (reason [{}])", index, reason); + logger.debug("[{}] closing ... (reason [{}])", indexName, reason); Map newIndices = new HashMap<>(indices); - indexService = newIndices.remove(index); + indexService = newIndices.remove(index.getUUID()); + assert indexService != null : "IndexService is null for index: " + index; indices = unmodifiableMap(newIndices); listener = indexService.getIndexEventListener(); } @@ -419,9 +404,9 @@ public class IndicesService extends AbstractLifecycleComponent i if (delete) { listener.beforeIndexDeleted(indexService); } - logger.debug("[{}] closing index service (reason [{}])", index, reason); + logger.debug("{} closing index service (reason [{}])", index, reason); indexService.close(reason, delete); - logger.debug("[{}] closed... (reason [{}])", index, reason); + logger.debug("{} closed... (reason [{}])", index, reason); listener.afterIndexClosed(indexService.index(), indexService.getIndexSettings().getSettings()); if (delete) { final IndexSettings indexSettings = indexService.getIndexSettings(); @@ -474,12 +459,12 @@ public class IndicesService extends AbstractLifecycleComponent i * Deletes the given index. Persistent parts of the index * like the shards files, state and transaction logs are removed once all resources are released. * - * Equivalent to {@link #removeIndex(String, String)} but fires + * Equivalent to {@link #removeIndex(Index, String)} but fires * different lifecycle events to ensure pending resources of this index are immediately removed. * @param index the index to delete * @param reason the high level reason causing this delete */ - public void deleteIndex(String index, String reason) throws IOException { + public void deleteIndex(Index index, String reason) throws IOException { removeIndex(index, reason, true); } @@ -505,16 +490,17 @@ public class IndicesService extends AbstractLifecycleComponent i public void deleteIndexStore(String reason, IndexMetaData metaData, ClusterState clusterState, boolean closed) throws IOException { if (nodeEnv.hasNodeFile()) { synchronized (this) { - String indexName = metaData.getIndex().getName(); - if (indices.containsKey(indexName)) { - String localUUid = indices.get(indexName).indexUUID(); - throw new IllegalStateException("Can't delete index store for [" + indexName + "] - it's still part of the indices service [" + localUUid + "] [" + metaData.getIndexUUID() + "]"); + Index index = metaData.getIndex(); + if (hasIndex(index)) { + String localUUid = indexService(index).indexUUID(); + throw new IllegalStateException("Can't delete index store for [" + index.getName() + "] - it's still part of the indices service [" + localUUid + "] [" + metaData.getIndexUUID() + "]"); } - if (clusterState.metaData().hasIndex(indexName) && (clusterState.nodes().localNode().masterNode() == true)) { + + if (clusterState.metaData().hasIndex(index.getName()) && (clusterState.nodes().localNode().masterNode() == true)) { // we do not delete the store if it is a master eligible node and the index is still in the cluster state // because we want to keep the meta data for indices around even if no shards are left here - final IndexMetaData index = clusterState.metaData().index(indexName); - throw new IllegalStateException("Can't delete closed index store for [" + indexName + "] - it's still part of the cluster state [" + index.getIndexUUID() + "] [" + metaData.getIndexUUID() + "]"); + final IndexMetaData idxMeta = clusterState.metaData().index(index.getName()); + throw new IllegalStateException("Can't delete closed index store for [" + index.getName() + "] - it's still part of the cluster state [" + idxMeta.getIndexUUID() + "] [" + metaData.getIndexUUID() + "]"); } } final IndexSettings indexSettings = buildIndexSettings(metaData); @@ -607,7 +593,7 @@ public class IndicesService extends AbstractLifecycleComponent i * @return true if the index can be deleted on this node */ public boolean canDeleteIndexContents(Index index, IndexSettings indexSettings, boolean closed) { - final IndexService indexService = this.indices.get(index.getName()); + final IndexService indexService = indexService(index); // Closed indices may be deleted, even if they are on a shared // filesystem. Since it is closed we aren't deleting it for relocation if (indexSettings.isOnSharedFilesystem() == false || closed) { @@ -634,7 +620,7 @@ public class IndicesService extends AbstractLifecycleComponent i */ public boolean canDeleteShardContent(ShardId shardId, IndexSettings indexSettings) { assert shardId.getIndex().equals(indexSettings.getIndex()); - final IndexService indexService = this.indices.get(shardId.getIndexName()); + final IndexService indexService = indexService(shardId.getIndex()); if (indexSettings.isOnSharedFilesystem() == false) { if (indexService != null && nodeEnv.hasNodeFile()) { return indexService.hasShard(shardId.id()) == false; diff --git a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index 7998afb7656..af667f356e8 100644 --- a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -46,6 +46,7 @@ import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.Callback; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexShardAlreadyExistsException; @@ -157,13 +158,13 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent { @Override public void handle(final IndexShard.ShardFailure shardFailure) { - final IndexService indexService = indicesService.indexService(shardFailure.routing.shardId().getIndex().getName()); + final IndexService indexService = indicesService.indexService(shardFailure.routing.shardId().getIndex()); final ShardRouting shardRouting = shardFailure.routing; threadPool.generic().execute(() -> { synchronized (mutex) { diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySource.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySource.java index 9a5c23fc2e1..934730c7c93 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySource.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySource.java @@ -83,7 +83,7 @@ public class RecoverySource extends AbstractComponent implements IndexEventListe } private RecoveryResponse recover(final StartRecoveryRequest request) throws IOException { - final IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex().getName()); + final IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); final IndexShard shard = indexService.getShard(request.shardId().id()); // starting recovery from that our (the source) shard state is marking the shard to be in recovery mode as well, otherwise diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index b92e2066af2..15b9b59dd28 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -137,7 +137,7 @@ public class RecoverySourceHandler { } } - logger.trace("snapshot translog for recovery. current size is [{}]", translogView.totalOperations()); + logger.trace("{} snapshot translog for recovery. current size is [{}]", shard.shardId(), translogView.totalOperations()); try { phase2(translogView.snapshot()); } catch (Throwable e) { diff --git a/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java b/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java index d0aec817ee9..6e9859efb2e 100644 --- a/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java +++ b/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java @@ -348,7 +348,7 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe return null; } ShardId shardId = request.shardId; - IndexService indexService = indicesService.indexService(shardId.getIndexName()); + IndexService indexService = indicesService.indexService(shardId.getIndex()); if (indexService != null && indexService.indexUUID().equals(request.indexUUID)) { return indexService.getShardOrNull(shardId.id()); } diff --git a/core/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java b/core/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java index bcc2d7f74c4..e009cbf04d1 100644 --- a/core/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java +++ b/core/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java @@ -126,7 +126,7 @@ public class TransportNodesListShardStoreMetaData extends TransportNodesAction(channel)); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/tasks/RestListTasksAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/tasks/RestListTasksAction.java index 992267fa8a5..9a9d1991298 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/tasks/RestListTasksAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/tasks/RestListTasksAction.java @@ -50,13 +50,15 @@ public class RestListTasksAction extends BaseRestHandler { TaskId taskId = new TaskId(request.param("taskId")); String[] actions = Strings.splitStringByCommaToArray(request.param("actions")); TaskId parentTaskId = new TaskId(request.param("parent_task_id")); + boolean waitForCompletion = request.paramAsBoolean("wait_for_completion", false); ListTasksRequest listTasksRequest = new ListTasksRequest(); - listTasksRequest.taskId(taskId); - listTasksRequest.nodesIds(nodesIds); - listTasksRequest.detailed(detailed); - listTasksRequest.actions(actions); - listTasksRequest.parentTaskId(parentTaskId); + listTasksRequest.setTaskId(taskId); + listTasksRequest.setNodesIds(nodesIds); + listTasksRequest.setDetailed(detailed); + listTasksRequest.setActions(actions); + listTasksRequest.setParentTaskId(parentTaskId); + listTasksRequest.setWaitForCompletion(waitForCompletion); client.admin().cluster().listTasks(listTasksRequest, new RestToXContentListener<>(channel)); } } diff --git a/core/src/main/java/org/elasticsearch/script/ScriptService.java b/core/src/main/java/org/elasticsearch/script/ScriptService.java index 8e1ac1c8d77..cfc402dbb04 100644 --- a/core/src/main/java/org/elasticsearch/script/ScriptService.java +++ b/core/src/main/java/org/elasticsearch/script/ScriptService.java @@ -55,7 +55,6 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.env.Environment; import org.elasticsearch.index.query.TemplateQueryParser; -import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.watcher.FileChangesListener; import org.elasticsearch.watcher.FileWatcher; @@ -225,6 +224,8 @@ public class ScriptService extends AbstractComponent implements Closeable { return scriptEngineService; } + + /** * Checks if a script can be executed and compiles it if needed, or returns the previously compiled and cached script. */ @@ -516,46 +517,53 @@ public class ScriptService extends AbstractComponent implements Closeable { private class ScriptChangesListener extends FileChangesListener { - private Tuple scriptNameExt(Path file) { + private Tuple getScriptNameExt(Path file) { Path scriptPath = scriptsDirectory.relativize(file); int extIndex = scriptPath.toString().lastIndexOf('.'); - if (extIndex != -1) { - String ext = scriptPath.toString().substring(extIndex + 1); - String scriptName = scriptPath.toString().substring(0, extIndex).replace(scriptPath.getFileSystem().getSeparator(), "_"); - return new Tuple<>(scriptName, ext); - } else { + if (extIndex <= 0) { return null; } + + String ext = scriptPath.toString().substring(extIndex + 1); + if (ext.isEmpty()) { + return null; + } + + String scriptName = scriptPath.toString().substring(0, extIndex).replace(scriptPath.getFileSystem().getSeparator(), "_"); + return new Tuple<>(scriptName, ext); } @Override public void onFileInit(Path file) { + Tuple scriptNameExt = getScriptNameExt(file); + if (scriptNameExt == null) { + logger.debug("Skipped script with invalid extension : [{}]", file); + return; + } if (logger.isTraceEnabled()) { logger.trace("Loading script file : [{}]", file); } - Tuple scriptNameExt = scriptNameExt(file); - if (scriptNameExt != null) { - ScriptEngineService engineService = getScriptEngineServiceForFileExt(scriptNameExt.v2()); - if (engineService == null) { - logger.warn("no script engine found for [{}]", scriptNameExt.v2()); - } else { - try { - //we don't know yet what the script will be used for, but if all of the operations for this lang - // with file scripts are disabled, it makes no sense to even compile it and cache it. - if (isAnyScriptContextEnabled(engineService.getTypes().get(0), engineService, ScriptType.FILE)) { - logger.info("compiling script file [{}]", file.toAbsolutePath()); - try(InputStreamReader reader = new InputStreamReader(Files.newInputStream(file), StandardCharsets.UTF_8)) { - String script = Streams.copyToString(reader); - CacheKey cacheKey = new CacheKey(engineService, scriptNameExt.v1(), null, Collections.emptyMap()); - staticCache.put(cacheKey, new CompiledScript(ScriptType.FILE, scriptNameExt.v1(), engineService.getTypes().get(0), engineService.compile(script, Collections.emptyMap()))); - scriptMetrics.onCompilation(); - } - } else { - logger.warn("skipping compile of script file [{}] as all scripted operations are disabled for file scripts", file.toAbsolutePath()); + + ScriptEngineService engineService = getScriptEngineServiceForFileExt(scriptNameExt.v2()); + if (engineService == null) { + logger.warn("No script engine found for [{}]", scriptNameExt.v2()); + } else { + try { + //we don't know yet what the script will be used for, but if all of the operations for this lang + // with file scripts are disabled, it makes no sense to even compile it and cache it. + if (isAnyScriptContextEnabled(engineService.getTypes().get(0), engineService, ScriptType.FILE)) { + logger.info("compiling script file [{}]", file.toAbsolutePath()); + try (InputStreamReader reader = new InputStreamReader(Files.newInputStream(file), StandardCharsets.UTF_8)) { + String script = Streams.copyToString(reader); + CacheKey cacheKey = new CacheKey(engineService, scriptNameExt.v1(), null, Collections.emptyMap()); + staticCache.put(cacheKey, new CompiledScript(ScriptType.FILE, scriptNameExt.v1(), engineService.getTypes().get(0), engineService.compile(script, Collections.emptyMap()))); + scriptMetrics.onCompilation(); } - } catch (Throwable e) { - logger.warn("failed to load/compile script [{}]", e, scriptNameExt.v1()); + } else { + logger.warn("skipping compile of script file [{}] as all scripted operations are disabled for file scripts", file.toAbsolutePath()); } + } catch (Throwable e) { + logger.warn("failed to load/compile script [{}]", e, scriptNameExt.v1()); } } } @@ -567,7 +575,7 @@ public class ScriptService extends AbstractComponent implements Closeable { @Override public void onFileDeleted(Path file) { - Tuple scriptNameExt = scriptNameExt(file); + Tuple scriptNameExt = getScriptNameExt(file); if (scriptNameExt != null) { ScriptEngineService engineService = getScriptEngineServiceForFileExt(scriptNameExt.v2()); assert engineService != null; diff --git a/core/src/main/java/org/elasticsearch/search/SearchException.java b/core/src/main/java/org/elasticsearch/search/SearchException.java index 0d181cc1dce..535f8acd446 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchException.java +++ b/core/src/main/java/org/elasticsearch/search/SearchException.java @@ -45,7 +45,7 @@ public class SearchException extends ElasticsearchException implements Elasticse public SearchException(StreamInput in) throws IOException { super(in); if (in.readBoolean()) { - shardTarget = SearchShardTarget.readSearchShardTarget(in); + shardTarget = new SearchShardTarget(in); } else { shardTarget = null; } @@ -54,7 +54,12 @@ public class SearchException extends ElasticsearchException implements Elasticse @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeOptionalStreamable(shardTarget); + if (shardTarget == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + shardTarget.writeTo(out); + } } public SearchShardTarget shard() { diff --git a/core/src/main/java/org/elasticsearch/search/SearchService.java b/core/src/main/java/org/elasticsearch/search/SearchService.java index e5cd7653a15..463e6d3e557 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchService.java +++ b/core/src/main/java/org/elasticsearch/search/SearchService.java @@ -534,10 +534,9 @@ public class SearchService extends AbstractLifecycleComponent imp } final SearchContext createContext(ShardSearchRequest request, @Nullable Engine.Searcher searcher) { - IndexService indexService = indicesService.indexServiceSafe(request.index()); - IndexShard indexShard = indexService.getShard(request.shardId()); - - SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().id(), indexShard.shardId().getIndex(), request.shardId()); + IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); + IndexShard indexShard = indexService.getShard(request.shardId().getId()); + SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().id(), indexShard.shardId()); Engine.Searcher engineSearcher = searcher == null ? indexShard.acquireSearcher("search") : searcher; diff --git a/core/src/main/java/org/elasticsearch/search/SearchShardTarget.java b/core/src/main/java/org/elasticsearch/search/SearchShardTarget.java index d3958505d70..d675a93b691 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchShardTarget.java +++ b/core/src/main/java/org/elasticsearch/search/SearchShardTarget.java @@ -23,28 +23,38 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.text.Text; import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; import java.io.IOException; /** * The target that the search request was executed on. */ -public class SearchShardTarget implements Streamable, Comparable { +public class SearchShardTarget implements Writeable, Comparable { private Text nodeId; private Text index; - private int shardId; + private ShardId shardId; - private SearchShardTarget() { + public SearchShardTarget(StreamInput in) throws IOException { + if (in.readBoolean()) { + nodeId = in.readText(); + } + shardId = ShardId.readShardId(in); + index = new Text(shardId.getIndexName()); + } + public SearchShardTarget(String nodeId, ShardId shardId) { + this.nodeId = nodeId == null ? null : new Text(nodeId); + this.index = new Text(shardId.getIndexName()); + this.shardId = shardId; } public SearchShardTarget(String nodeId, Index index, int shardId) { - this.nodeId = nodeId == null ? null : new Text(nodeId); - this.index = new Text(index.getName()); - this.shardId = shardId; + this(nodeId, new ShardId(index, shardId)); } @Nullable @@ -73,36 +83,26 @@ public class SearchShardTarget implements Streamable, Comparable o1, AtomicArray.Entry o2) { int i = o1.value.shardTarget().index().compareTo(o2.value.shardTarget().index()); if (i == 0) { - i = o1.value.shardTarget().shardId() - o2.value.shardTarget().shardId(); + i = o1.value.shardTarget().shardId().id() - o2.value.shardTarget().shardId().id(); } return i; } diff --git a/core/src/main/java/org/elasticsearch/search/fetch/ScrollQueryFetchSearchResult.java b/core/src/main/java/org/elasticsearch/search/fetch/ScrollQueryFetchSearchResult.java index fb0fc75299f..dbaee5b64bb 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/ScrollQueryFetchSearchResult.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/ScrollQueryFetchSearchResult.java @@ -26,7 +26,6 @@ import org.elasticsearch.transport.TransportResponse; import java.io.IOException; -import static org.elasticsearch.search.SearchShardTarget.readSearchShardTarget; import static org.elasticsearch.search.fetch.QueryFetchSearchResult.readQueryFetchSearchResult; /** @@ -56,7 +55,7 @@ public class ScrollQueryFetchSearchResult extends TransportResponse { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - shardTarget = readSearchShardTarget(in); + shardTarget = new SearchShardTarget(in); result = readQueryFetchSearchResult(in); result.shardTarget(shardTarget); } diff --git a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java index c6afe325bb3..dcbcce503a4 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java +++ b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java @@ -55,7 +55,6 @@ import static java.util.Collections.singletonMap; import static java.util.Collections.unmodifiableMap; import static org.elasticsearch.common.lucene.Lucene.readExplanation; import static org.elasticsearch.common.lucene.Lucene.writeExplanation; -import static org.elasticsearch.search.SearchShardTarget.readSearchShardTarget; import static org.elasticsearch.search.highlight.HighlightField.readHighlightField; import static org.elasticsearch.search.internal.InternalSearchHitField.readSearchHitField; @@ -638,7 +637,7 @@ public class InternalSearchHit implements SearchHit { if (context.streamShardTarget() == ShardTargetType.STREAM) { if (in.readBoolean()) { - shard = readSearchShardTarget(in); + shard = new SearchShardTarget(in); } } else if (context.streamShardTarget() == ShardTargetType.LOOKUP) { int lookupId = in.readVInt(); diff --git a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHits.java b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHits.java index 9e787cf2aa9..09d11e1a1a3 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHits.java +++ b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHits.java @@ -34,7 +34,6 @@ import java.util.IdentityHashMap; import java.util.Iterator; import java.util.Map; -import static org.elasticsearch.search.SearchShardTarget.readSearchShardTarget; import static org.elasticsearch.search.internal.InternalSearchHit.readSearchHit; /** @@ -216,7 +215,7 @@ public class InternalSearchHits implements SearchHits { // read the lookup table first int lookupSize = in.readVInt(); for (int i = 0; i < lookupSize; i++) { - context.handleShardLookup().put(in.readVInt(), readSearchShardTarget(in)); + context.handleShardLookup().put(in.readVInt(), new SearchShardTarget(in)); } } @@ -262,4 +261,4 @@ public class InternalSearchHits implements SearchHits { } } } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java index 0f46461f4a2..56ad8ed9467 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java @@ -58,8 +58,7 @@ import static org.elasticsearch.search.Scroll.readScroll; public class ShardSearchLocalRequest implements ShardSearchRequest { - private String index; - private int shardId; + private ShardId shardId; private int numberOfShards; private SearchType searchType; private Scroll scroll; @@ -97,8 +96,7 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { public ShardSearchLocalRequest(ShardId shardId, int numberOfShards, SearchType searchType, SearchSourceBuilder source, String[] types, Boolean requestCache) { - this.index = shardId.getIndexName(); - this.shardId = shardId.id(); + this.shardId = shardId; this.numberOfShards = numberOfShards; this.searchType = searchType; this.source = source; @@ -106,13 +104,9 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { this.requestCache = requestCache; } - @Override - public String index() { - return index; - } @Override - public int shardId() { + public ShardId shardId() { return shardId; } @@ -177,8 +171,7 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { @SuppressWarnings("unchecked") protected void innerReadFrom(StreamInput in) throws IOException { - index = in.readString(); - shardId = in.readVInt(); + shardId = ShardId.readShardId(in); searchType = SearchType.fromId(in.readByte()); numberOfShards = in.readVInt(); if (in.readBoolean()) { @@ -195,8 +188,7 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { } protected void innerWriteTo(StreamOutput out, boolean asKey) throws IOException { - out.writeString(index); - out.writeVInt(shardId); + shardId.writeTo(out); out.writeByte(searchType.id()); if (!asKey) { out.writeVInt(numberOfShards); diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java index 1f0b3d1f188..82ff69078aa 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java @@ -21,6 +21,7 @@ package org.elasticsearch.search.internal; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.script.Template; import org.elasticsearch.search.Scroll; import org.elasticsearch.search.builder.SearchSourceBuilder; @@ -34,9 +35,7 @@ import java.io.IOException; */ public interface ShardSearchRequest { - String index(); - - int shardId(); + ShardId shardId(); String[] types(); diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java index 48ea31c170a..dc19f84c7a7 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java @@ -28,6 +28,7 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.script.Template; import org.elasticsearch.search.Scroll; import org.elasticsearch.search.builder.SearchSourceBuilder; @@ -71,13 +72,9 @@ public class ShardSearchTransportRequest extends TransportRequest implements Sha return originalIndices.indicesOptions(); } - @Override - public String index() { - return shardSearchLocalRequest.index(); - } @Override - public int shardId() { + public ShardId shardId() { return shardSearchLocalRequest.shardId(); } diff --git a/core/src/main/java/org/elasticsearch/search/query/ScrollQuerySearchResult.java b/core/src/main/java/org/elasticsearch/search/query/ScrollQuerySearchResult.java index ebb7615da44..bcdd94adf89 100644 --- a/core/src/main/java/org/elasticsearch/search/query/ScrollQuerySearchResult.java +++ b/core/src/main/java/org/elasticsearch/search/query/ScrollQuerySearchResult.java @@ -26,7 +26,6 @@ import org.elasticsearch.transport.TransportResponse; import java.io.IOException; -import static org.elasticsearch.search.SearchShardTarget.readSearchShardTarget; import static org.elasticsearch.search.query.QuerySearchResult.readQuerySearchResult; /** @@ -56,7 +55,7 @@ public class ScrollQuerySearchResult extends TransportResponse { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - shardTarget = readSearchShardTarget(in); + shardTarget = new SearchShardTarget(in); queryResult = readQuerySearchResult(in); queryResult.shardTarget(shardTarget); } diff --git a/core/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java index c415fd5a70b..e805e21eff5 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java @@ -27,16 +27,12 @@ import java.io.IOException; /** * A sort builder to sort based on a document field. */ -public class FieldSortBuilder extends SortBuilder { +public class FieldSortBuilder extends SortBuilder { private final String fieldName; - private SortOrder order; - private Object missing; - private Boolean ignoreUnmapped; - private String unmappedType; private String sortMode; @@ -57,36 +53,15 @@ public class FieldSortBuilder extends SortBuilder { this.fieldName = fieldName; } - /** - * The order of sorting. Defaults to {@link SortOrder#ASC}. - */ - @Override - public FieldSortBuilder order(SortOrder order) { - this.order = order; - return this; - } - /** * Sets the value when a field is missing in a doc. Can also be set to _last or * _first to sort missing last or first respectively. */ - @Override public FieldSortBuilder missing(Object missing) { this.missing = missing; return this; } - /** - * Sets if the field does not exists in the index, it should be ignored and not sorted by or not. Defaults - * to false (not ignoring). - * @deprecated Use {@link #unmappedType(String)} instead. - */ - @Deprecated - public FieldSortBuilder ignoreUnmapped(boolean ignoreUnmapped) { - this.ignoreUnmapped = ignoreUnmapped; - return this; - } - /** * Set the type to use in case the current field is not mapped in an index. * Specifying a type tells Elasticsearch what type the sort values should have, which is important @@ -132,15 +107,10 @@ public class FieldSortBuilder extends SortBuilder { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(fieldName); - if (order != null) { - builder.field("order", order.toString()); - } + builder.field(ORDER_FIELD.getPreferredName(), order); if (missing != null) { builder.field("missing", missing); } - if (ignoreUnmapped != null) { - builder.field(SortParseElement.IGNORE_UNMAPPED.getPreferredName(), ignoreUnmapped); - } if (unmappedType != null) { builder.field(SortParseElement.UNMAPPED_TYPE.getPreferredName(), unmappedType); } diff --git a/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java index e37eed61c6d..b5a10e238b7 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java @@ -44,7 +44,7 @@ import java.util.Objects; /** * A geo distance based sorting on a geo point like field. */ -public class GeoDistanceSortBuilder extends SortBuilder +public class GeoDistanceSortBuilder extends SortBuilder implements ToXContent, NamedWriteable, SortElementParserTemp { public static final String NAME = "_geo_distance"; public static final boolean DEFAULT_COERCE = false; @@ -57,14 +57,13 @@ public class GeoDistanceSortBuilder extends SortBuilder private GeoDistance geoDistance = GeoDistance.DEFAULT; private DistanceUnit unit = DistanceUnit.DEFAULT; - private SortOrder order = SortOrder.ASC; - + // TODO there is an enum that covers that parameter which we should be using here private String sortMode = null; @SuppressWarnings("rawtypes") private QueryBuilder nestedFilter; private String nestedPath; - + // TODO switch to GeoValidationMethod enum private boolean coerce = DEFAULT_COERCE; private boolean ignoreMalformed = DEFAULT_IGNORE_MALFORMED; @@ -109,7 +108,7 @@ public class GeoDistanceSortBuilder extends SortBuilder } this.fieldName = fieldName; } - + /** * Copy constructor. * */ @@ -125,7 +124,7 @@ public class GeoDistanceSortBuilder extends SortBuilder this.coerce = original.coerce; this.ignoreMalformed = original.ignoreMalformed; } - + /** * Returns the geo point like field the distance based sort operates on. * */ @@ -153,7 +152,7 @@ public class GeoDistanceSortBuilder extends SortBuilder this.points.addAll(Arrays.asList(points)); return this; } - + /** * Returns the points to create the range distance facets from. */ @@ -163,7 +162,7 @@ public class GeoDistanceSortBuilder extends SortBuilder /** * The geohash of the geo point to create the range distance facets from. - * + * * Deprecated - please use points(GeoPoint... points) instead. */ @Deprecated @@ -173,7 +172,7 @@ public class GeoDistanceSortBuilder extends SortBuilder } return this; } - + /** * The geo distance type used to compute the distance. */ @@ -181,7 +180,7 @@ public class GeoDistanceSortBuilder extends SortBuilder this.geoDistance = geoDistance; return this; } - + /** * Returns the geo distance type used to compute the distance. */ @@ -204,30 +203,6 @@ public class GeoDistanceSortBuilder extends SortBuilder return this.unit; } - /** - * The order of sorting. Defaults to {@link SortOrder#ASC}. - */ - @Override - public GeoDistanceSortBuilder order(SortOrder order) { - this.order = order; - return this; - } - - /** Returns the order of sorting. */ - public SortOrder order() { - return this.order; - } - - /** - * Not relevant. - * - * TODO should this throw an exception rather than silently ignore a parameter that is not used? - */ - @Override - public GeoDistanceSortBuilder missing(Object missing) { - return this; - } - /** * Defines which distance to use for sorting in the case a document contains multiple geo points. * Possible values: min and max @@ -250,16 +225,16 @@ public class GeoDistanceSortBuilder extends SortBuilder * Sets the nested filter that the nested objects should match with in order to be taken into account * for sorting. */ - public GeoDistanceSortBuilder setNestedFilter(QueryBuilder nestedFilter) { + public GeoDistanceSortBuilder setNestedFilter(QueryBuilder nestedFilter) { this.nestedFilter = nestedFilter; return this; } - /** + /** * Returns the nested filter that the nested objects should match with in order to be taken into account - * for sorting. + * for sorting. **/ - public QueryBuilder getNestedFilter() { + public QueryBuilder getNestedFilter() { return this.nestedFilter; } @@ -271,7 +246,7 @@ public class GeoDistanceSortBuilder extends SortBuilder this.nestedPath = nestedPath; return this; } - + /** * Returns the nested path if sorting occurs on a field that is inside a nested object. By default when sorting on a * field inside a nested object, the nearest upper nested object is selected as nested path. @@ -295,7 +270,7 @@ public class GeoDistanceSortBuilder extends SortBuilder } return this; } - + public boolean ignoreMalformed() { return this.ignoreMalformed; } @@ -312,11 +287,7 @@ public class GeoDistanceSortBuilder extends SortBuilder builder.field("unit", unit); builder.field("distance_type", geoDistance.name().toLowerCase(Locale.ROOT)); - if (order == SortOrder.DESC) { - builder.field("reverse", true); - } else { - builder.field("reverse", false); - } + builder.field(ORDER_FIELD.getPreferredName(), order); if (sortMode != null) { builder.field("mode", sortMode); @@ -373,7 +344,7 @@ public class GeoDistanceSortBuilder extends SortBuilder public void writeTo(StreamOutput out) throws IOException { out.writeString(fieldName); out.writeGenericValue(points); - + geoDistance.writeTo(out); unit.writeTo(out); order.writeTo(out); @@ -392,10 +363,10 @@ public class GeoDistanceSortBuilder extends SortBuilder @Override public GeoDistanceSortBuilder readFrom(StreamInput in) throws IOException { String fieldName = in.readString(); - - ArrayList points = (ArrayList) in.readGenericValue(); + + ArrayList points = (ArrayList) in.readGenericValue(); GeoDistanceSortBuilder result = new GeoDistanceSortBuilder(fieldName, points.toArray(new GeoPoint[points.size()])); - + result.geoDistance(GeoDistance.readGeoDistanceFrom(in)); result.unit(DistanceUnit.readDistanceUnit(in)); result.order(SortOrder.readOrderFrom(in)); @@ -419,9 +390,9 @@ public class GeoDistanceSortBuilder extends SortBuilder List geoPoints = new ArrayList<>(); DistanceUnit unit = DistanceUnit.DEFAULT; GeoDistance geoDistance = GeoDistance.DEFAULT; - boolean reverse = false; + SortOrder order = SortOrder.ASC; MultiValueMode sortMode = null; - QueryBuilder nestedFilter = null; + QueryBuilder nestedFilter = null; String nestedPath = null; boolean coerce = GeoDistanceSortBuilder.DEFAULT_COERCE; @@ -439,8 +410,8 @@ public class GeoDistanceSortBuilder extends SortBuilder } else if (token == XContentParser.Token.START_OBJECT) { // the json in the format of -> field : { lat : 30, lon : 12 } if ("nested_filter".equals(currentName) || "nestedFilter".equals(currentName)) { - // TODO Note to remember: while this is kept as a QueryBuilder internally, - // we need to make sure to call toFilter() on it once on the shard + // TODO Note to remember: while this is kept as a QueryBuilder internally, + // we need to make sure to call toFilter() on it once on the shard // (e.g. in the new build() method) nestedFilter = context.parseInnerQueryBuilder(); } else { @@ -451,9 +422,9 @@ public class GeoDistanceSortBuilder extends SortBuilder } } else if (token.isValue()) { if ("reverse".equals(currentName)) { - reverse = parser.booleanValue(); + order = parser.booleanValue() ? SortOrder.DESC : SortOrder.ASC; } else if ("order".equals(currentName)) { - reverse = "desc".equals(parser.text()); + order = SortOrder.fromString(parser.text()); } else if ("unit".equals(currentName)) { unit = DistanceUnit.fromString(parser.text()); } else if ("distance_type".equals(currentName) || "distanceType".equals(currentName)) { @@ -484,11 +455,7 @@ public class GeoDistanceSortBuilder extends SortBuilder GeoDistanceSortBuilder result = new GeoDistanceSortBuilder(fieldName, geoPoints.toArray(new GeoPoint[geoPoints.size()])); result.geoDistance(geoDistance); result.unit(unit); - if (reverse) { - result.order(SortOrder.DESC); - } else { - result.order(SortOrder.ASC); - } + result.order(order); if (sortMode != null) { result.sortMode(sortMode.name()); } diff --git a/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortParser.java b/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortParser.java index 27c8b8e0ed5..b9407b31bf6 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortParser.java +++ b/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortParser.java @@ -43,9 +43,9 @@ import org.elasticsearch.index.fielddata.MultiGeoPointValues; import org.elasticsearch.index.fielddata.NumericDoubleValues; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.support.NestedInnerQueryParseSupport; import org.elasticsearch.search.MultiValueMode; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.ArrayList; @@ -62,7 +62,7 @@ public class GeoDistanceSortParser implements SortParser { } @Override - public SortField parse(XContentParser parser, SearchContext context) throws Exception { + public SortField parse(XContentParser parser, QueryShardContext context) throws Exception { String fieldName = null; List geoPoints = new ArrayList<>(); DistanceUnit unit = DistanceUnit.DEFAULT; @@ -71,7 +71,7 @@ public class GeoDistanceSortParser implements SortParser { MultiValueMode sortMode = null; NestedInnerQueryParseSupport nestedHelper = null; - final boolean indexCreatedBeforeV2_0 = context.indexShard().indexSettings().getIndexVersionCreated().before(Version.V_2_0_0); + final boolean indexCreatedBeforeV2_0 = context.indexVersionCreated().before(Version.V_2_0_0); boolean coerce = GeoDistanceSortBuilder.DEFAULT_COERCE; boolean ignoreMalformed = GeoDistanceSortBuilder.DEFAULT_IGNORE_MALFORMED; @@ -155,12 +155,12 @@ public class GeoDistanceSortParser implements SortParser { throw new IllegalArgumentException("sort_mode [sum] isn't supported for sorting by geo distance"); } - MappedFieldType fieldType = context.smartNameFieldType(fieldName); + MappedFieldType fieldType = context.fieldMapper(fieldName); if (fieldType == null) { throw new IllegalArgumentException("failed to find mapper for [" + fieldName + "] for geo distance based sort"); } final MultiValueMode finalSortMode = sortMode; // final reference for use in the anonymous class - final IndexGeoPointFieldData geoIndexFieldData = context.fieldData().getForField(fieldType); + final IndexGeoPointFieldData geoIndexFieldData = context.getForField(fieldType); final FixedSourceDistance[] distances = new FixedSourceDistance[geoPoints.size()]; for (int i = 0; i< geoPoints.size(); i++) { distances[i] = geoDistance.fixedSourceDistance(geoPoints.get(i).lat(), geoPoints.get(i).lon(), unit); @@ -168,15 +168,16 @@ public class GeoDistanceSortParser implements SortParser { final Nested nested; if (nestedHelper != null && nestedHelper.getPath() != null) { - BitSetProducer rootDocumentsFilter = context.bitsetFilterCache().getBitSetProducer(Queries.newNonNestedFilter()); - Query innerDocumentsFilter; + BitSetProducer rootDocumentsFilter = context.bitsetFilter(Queries.newNonNestedFilter()); + Query innerDocumentsQuery; if (nestedHelper.filterFound()) { // TODO: use queries instead - innerDocumentsFilter = nestedHelper.getInnerFilter(); + innerDocumentsQuery = nestedHelper.getInnerFilter(); } else { - innerDocumentsFilter = nestedHelper.getNestedObjectMapper().nestedTypeFilter(); + innerDocumentsQuery = nestedHelper.getNestedObjectMapper().nestedTypeFilter(); } - nested = new Nested(rootDocumentsFilter, context.searcher().createNormalizedWeight(innerDocumentsFilter, false)); + + nested = new Nested(rootDocumentsFilter, innerDocumentsQuery); } else { nested = null; } diff --git a/core/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java index 7435ff95f45..6b1bc054ee7 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java @@ -19,40 +19,103 @@ package org.elasticsearch.search.sort; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.ParseFieldMatcher; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryParseContext; import java.io.IOException; +import java.util.Objects; /** * A sort builder allowing to sort by score. - * - * */ -public class ScoreSortBuilder extends SortBuilder { +public class ScoreSortBuilder extends SortBuilder implements NamedWriteable, + SortElementParserTemp { - private SortOrder order; + private static final String NAME = "_score"; + static final ScoreSortBuilder PROTOTYPE = new ScoreSortBuilder(); + public static final ParseField REVERSE_FIELD = new ParseField("reverse"); + public static final ParseField ORDER_FIELD = new ParseField("order"); - /** - * The order of sort scoring. By default, its {@link SortOrder#DESC}. - */ - @Override - public ScoreSortBuilder order(SortOrder order) { - this.order = order; - return this; + public ScoreSortBuilder() { + // order defaults to desc when sorting on the _score + order(SortOrder.DESC); } - @Override - public SortBuilder missing(Object missing) { - return this; - } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject("_score"); - if (order == SortOrder.ASC) { - builder.field("reverse", true); - } + builder.startObject(NAME); + builder.field(ORDER_FIELD.getPreferredName(), order); builder.endObject(); return builder; } + + @Override + public ScoreSortBuilder fromXContent(QueryParseContext context, String elementName) throws IOException { + XContentParser parser = context.parser(); + ParseFieldMatcher matcher = context.parseFieldMatcher(); + + XContentParser.Token token; + String currentName = parser.currentName(); + ScoreSortBuilder result = new ScoreSortBuilder(); + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentName = parser.currentName(); + } else if (token.isValue()) { + if (matcher.match(currentName, REVERSE_FIELD)) { + if (parser.booleanValue()) { + result.order(SortOrder.ASC); + } + // else we keep the default DESC + } else if (matcher.match(currentName, ORDER_FIELD)) { + result.order(SortOrder.fromString(parser.text())); + } else { + throw new ParsingException(parser.getTokenLocation(), "[" + NAME + "] failed to parse field [" + currentName + "]"); + } + } else { + throw new ParsingException(parser.getTokenLocation(), "[" + NAME + "] unexpected token [" + token + "]"); + } + } + return result; + } + + @Override + public boolean equals(Object object) { + if (this == object) { + return true; + } + if (object == null || getClass() != object.getClass()) { + return false; + } + ScoreSortBuilder other = (ScoreSortBuilder) object; + return Objects.equals(order, other.order); + } + + @Override + public int hashCode() { + return Objects.hash(this.order); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + order.writeTo(out); + } + + @Override + public ScoreSortBuilder readFrom(StreamInput in) throws IOException { + ScoreSortBuilder builder = new ScoreSortBuilder().order(SortOrder.readOrderFrom(in)); + return builder; + } + + @Override + public String getWriteableName() { + return NAME; + } } diff --git a/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java index e9a9c8df57c..e554eb8846b 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java @@ -28,14 +28,12 @@ import java.io.IOException; /** * Script sort builder allows to sort based on a custom script expression. */ -public class ScriptSortBuilder extends SortBuilder { +public class ScriptSortBuilder extends SortBuilder { private Script script; private final String type; - private SortOrder order; - private String sortMode; private QueryBuilder nestedFilter; @@ -53,23 +51,6 @@ public class ScriptSortBuilder extends SortBuilder { this.type = type; } - /** - * Sets the sort order. - */ - @Override - public ScriptSortBuilder order(SortOrder order) { - this.order = order; - return this; - } - - /** - * Not really relevant. - */ - @Override - public SortBuilder missing(Object missing) { - return this; - } - /** * Defines which distance to use for sorting in the case a document contains multiple geo points. * Possible values: min and max @@ -83,7 +64,7 @@ public class ScriptSortBuilder extends SortBuilder { * Sets the nested filter that the nested objects should match with in order to be taken into account * for sorting. */ - public ScriptSortBuilder setNestedFilter(QueryBuilder nestedFilter) { + public ScriptSortBuilder setNestedFilter(QueryBuilder nestedFilter) { this.nestedFilter = nestedFilter; return this; } @@ -102,9 +83,7 @@ public class ScriptSortBuilder extends SortBuilder { builder.startObject("_script"); builder.field("script", script); builder.field("type", type); - if (order == SortOrder.DESC) { - builder.field("reverse", true); - } + builder.field(ORDER_FIELD.getPreferredName(), order); if (sortMode != null) { builder.field("mode", sortMode); } diff --git a/core/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java b/core/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java index e4fe2c08f75..c30ea503d80 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java +++ b/core/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java @@ -27,6 +27,7 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.search.join.BitSetProducer; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.fielddata.FieldData; @@ -37,6 +38,7 @@ import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource; import org.elasticsearch.index.fielddata.fieldcomparator.DoubleValuesComparatorSource; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.support.NestedInnerQueryParseSupport; import org.elasticsearch.script.LeafSearchScript; import org.elasticsearch.script.Script; @@ -68,7 +70,7 @@ public class ScriptSortParser implements SortParser { } @Override - public SortField parse(XContentParser parser, SearchContext context) throws Exception { + public SortField parse(XContentParser parser, QueryShardContext context) throws Exception { ScriptParameterParser scriptParameterParser = new ScriptParameterParser(); Script script = null; String type = null; @@ -122,19 +124,20 @@ public class ScriptSortParser implements SortParser { script = new Script(scriptValue.script(), scriptValue.scriptType(), scriptParameterParser.lang(), params); } } else if (params != null) { - throw new SearchParseException(context, "script params must be specified inside script object", parser.getTokenLocation()); + throw new ParsingException(parser.getTokenLocation(), "script params must be specified inside script object"); } if (script == null) { - throw new SearchParseException(context, "_script sorting requires setting the script to sort by", parser.getTokenLocation()); + throw new ParsingException(parser.getTokenLocation(), "_script sorting requires setting the script to sort by"); } if (type == null) { - throw new SearchParseException(context, "_script sorting requires setting the type of the script", parser.getTokenLocation()); + throw new ParsingException(parser.getTokenLocation(), "_script sorting requires setting the type of the script"); } - final SearchScript searchScript = context.scriptService().search(context.lookup(), script, ScriptContext.Standard.SEARCH, Collections.emptyMap()); + final SearchScript searchScript = context.getScriptService().search( + context.lookup(), script, ScriptContext.Standard.SEARCH, Collections.emptyMap()); if (STRING_SORT_TYPE.equals(type) && (sortMode == MultiValueMode.SUM || sortMode == MultiValueMode.AVG)) { - throw new SearchParseException(context, "type [string] doesn't support mode [" + sortMode + "]", parser.getTokenLocation()); + throw new ParsingException(parser.getTokenLocation(), "type [string] doesn't support mode [" + sortMode + "]"); } if (sortMode == null) { @@ -144,7 +147,7 @@ public class ScriptSortParser implements SortParser { // If nested_path is specified, then wrap the `fieldComparatorSource` in a `NestedFieldComparatorSource` final Nested nested; if (nestedHelper != null && nestedHelper.getPath() != null) { - BitSetProducer rootDocumentsFilter = context.bitsetFilterCache().getBitSetProducer(Queries.newNonNestedFilter()); + BitSetProducer rootDocumentsFilter = context.bitsetFilter(Queries.newNonNestedFilter()); Query innerDocumentsFilter; if (nestedHelper.filterFound()) { // TODO: use queries instead @@ -152,7 +155,7 @@ public class ScriptSortParser implements SortParser { } else { innerDocumentsFilter = nestedHelper.getNestedObjectMapper().nestedTypeFilter(); } - nested = new Nested(rootDocumentsFilter, context.searcher().createNormalizedWeight(innerDocumentsFilter, false)); + nested = new Nested(rootDocumentsFilter, innerDocumentsFilter); } else { nested = null; } @@ -205,7 +208,7 @@ public class ScriptSortParser implements SortParser { }; break; default: - throw new SearchParseException(context, "custom script sort type [" + type + "] not supported", parser.getTokenLocation()); + throw new ParsingException(parser.getTokenLocation(), "custom script sort type [" + type + "] not supported"); } return new SortField("_script", fieldComparatorSource, reverse); diff --git a/core/src/main/java/org/elasticsearch/search/sort/SortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/SortBuilder.java index da80506dde2..7852af4e97e 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/SortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/SortBuilder.java @@ -20,14 +20,20 @@ package org.elasticsearch.search.sort; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import java.util.Objects; + /** * */ -public abstract class SortBuilder implements ToXContent { +public abstract class SortBuilder> implements ToXContent { + + protected SortOrder order = SortOrder.ASC; + public static final ParseField ORDER_FIELD = new ParseField("order"); @Override public String toString() { @@ -42,13 +48,19 @@ public abstract class SortBuilder implements ToXContent { } /** - * The order of sorting. Defaults to {@link SortOrder#ASC}. + * Set the order of sorting. */ - public abstract SortBuilder order(SortOrder order); + @SuppressWarnings("unchecked") + public T order(SortOrder order) { + Objects.requireNonNull(order, "sort order cannot be null."); + this.order = order; + return (T) this; + } /** - * Sets the value when a field is missing in a doc. Can also be set to _last or - * _first to sort missing last or first respectively. + * Return the {@link SortOrder} used for this {@link SortBuilder}. */ - public abstract SortBuilder missing(Object missing); + public SortOrder order() { + return this.order; + } } diff --git a/core/src/main/java/org/elasticsearch/search/sort/SortElementParserTemp.java b/core/src/main/java/org/elasticsearch/search/sort/SortElementParserTemp.java index 8893471b6c1..069f1380b49 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/SortElementParserTemp.java +++ b/core/src/main/java/org/elasticsearch/search/sort/SortElementParserTemp.java @@ -19,13 +19,12 @@ package org.elasticsearch.search.sort; -import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.index.query.QueryParseContext; import java.io.IOException; // TODO once sort refactoring is done this needs to be merged into SortBuilder -public interface SortElementParserTemp { +public interface SortElementParserTemp { /** * Creates a new SortBuilder from the json held by the {@link SortElementParserTemp} * in {@link org.elasticsearch.common.xcontent.XContent} format diff --git a/core/src/main/java/org/elasticsearch/search/sort/SortParseElement.java b/core/src/main/java/org/elasticsearch/search/sort/SortParseElement.java index a99158787d3..fe0b62022fe 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/SortParseElement.java +++ b/core/src/main/java/org/elasticsearch/search/sort/SortParseElement.java @@ -30,7 +30,6 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.core.LongFieldMapper; import org.elasticsearch.index.query.support.NestedInnerQueryParseSupport; import org.elasticsearch.search.MultiValueMode; import org.elasticsearch.search.SearchParseElement; @@ -55,7 +54,6 @@ public class SortParseElement implements SearchParseElement { private static final SortField SORT_DOC = new SortField(null, SortField.Type.DOC); private static final SortField SORT_DOC_REVERSE = new SortField(null, SortField.Type.DOC, true); - public static final ParseField IGNORE_UNMAPPED = new ParseField("ignore_unmapped"); public static final ParseField UNMAPPED_TYPE = new ParseField("unmapped_type"); public static final String SCORE_FIELD_NAME = "_score"; @@ -140,7 +138,7 @@ public class SortParseElement implements SearchParseElement { addSortField(context, sortFields, fieldName, reverse, unmappedType, missing, sortMode, nestedFilterParseHelper); } else { if (PARSERS.containsKey(fieldName)) { - sortFields.add(PARSERS.get(fieldName).parse(parser, context)); + sortFields.add(PARSERS.get(fieldName).parse(parser, context.getQueryShardContext())); } else { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { @@ -156,19 +154,13 @@ public class SortParseElement implements SearchParseElement { } } else if ("missing".equals(innerJsonName)) { missing = parser.textOrNull(); - } else if (context.parseFieldMatcher().match(innerJsonName, IGNORE_UNMAPPED)) { - // backward compatibility: ignore_unmapped has been replaced with unmapped_type - if (unmappedType == null // don't override if unmapped_type has been provided too - && parser.booleanValue()) { - unmappedType = LongFieldMapper.CONTENT_TYPE; - } } else if (context.parseFieldMatcher().match(innerJsonName, UNMAPPED_TYPE)) { unmappedType = parser.textOrNull(); } else if ("mode".equals(innerJsonName)) { sortMode = MultiValueMode.fromString(parser.text()); } else if ("nested_path".equals(innerJsonName) || "nestedPath".equals(innerJsonName)) { if (nestedFilterParseHelper == null) { - nestedFilterParseHelper = new NestedInnerQueryParseSupport(parser, context); + nestedFilterParseHelper = new NestedInnerQueryParseSupport(parser, context.getQueryShardContext()); } nestedFilterParseHelper.setPath(parser.text()); } else { @@ -177,7 +169,7 @@ public class SortParseElement implements SearchParseElement { } else if (token == XContentParser.Token.START_OBJECT) { if ("nested_filter".equals(innerJsonName) || "nestedFilter".equals(innerJsonName)) { if (nestedFilterParseHelper == null) { - nestedFilterParseHelper = new NestedInnerQueryParseSupport(parser, context); + nestedFilterParseHelper = new NestedInnerQueryParseSupport(parser, context.getQueryShardContext()); } nestedFilterParseHelper.filter(); } else { @@ -239,14 +231,13 @@ public class SortParseElement implements SearchParseElement { final Nested nested; if (nestedHelper != null && nestedHelper.getPath() != null) { BitSetProducer rootDocumentsFilter = context.bitsetFilterCache().getBitSetProducer(Queries.newNonNestedFilter()); - Query innerDocumentsFilter; + Query innerDocumentsQuery; if (nestedHelper.filterFound()) { - // TODO: use queries instead - innerDocumentsFilter = nestedHelper.getInnerFilter(); + innerDocumentsQuery = nestedHelper.getInnerFilter(); } else { - innerDocumentsFilter = nestedHelper.getNestedObjectMapper().nestedTypeFilter(); + innerDocumentsQuery = nestedHelper.getNestedObjectMapper().nestedTypeFilter(); } - nested = new Nested(rootDocumentsFilter, context.searcher().createNormalizedWeight(innerDocumentsFilter, false)); + nested = new Nested(rootDocumentsFilter, innerDocumentsQuery); } else { nested = null; } diff --git a/core/src/main/java/org/elasticsearch/search/sort/SortParser.java b/core/src/main/java/org/elasticsearch/search/sort/SortParser.java index 6383afd8845..727e576a85e 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/SortParser.java +++ b/core/src/main/java/org/elasticsearch/search/sort/SortParser.java @@ -21,7 +21,7 @@ package org.elasticsearch.search.sort; import org.apache.lucene.search.SortField; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.index.query.QueryShardContext; /** * @@ -30,5 +30,5 @@ public interface SortParser { String[] names(); - SortField parse(XContentParser parser, SearchContext context) throws Exception; + SortField parse(XContentParser parser, QueryShardContext context) throws Exception; } diff --git a/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java index c7f4392e56a..8b6f1198705 100644 --- a/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java +++ b/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java @@ -222,7 +222,7 @@ public class ThreadPool extends AbstractComponent implements Closeable { int halfProcMaxAt5 = Math.min(((availableProcessors + 1) / 2), 5); int halfProcMaxAt10 = Math.min(((availableProcessors + 1) / 2), 10); Map defaultExecutorTypeSettings = new HashMap<>(); - add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.GENERIC).keepAlive("30s")); + add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.GENERIC).size(4 * availableProcessors).keepAlive("30s")); add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.INDEX).size(availableProcessors).queueSize(200)); add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.BULK).size(availableProcessors).queueSize(50)); add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.GET).size(availableProcessors).queueSize(1000)); diff --git a/core/src/main/java/org/elasticsearch/tribe/TribeService.java b/core/src/main/java/org/elasticsearch/tribe/TribeService.java index bf66cce1b9e..2bd40539807 100644 --- a/core/src/main/java/org/elasticsearch/tribe/TribeService.java +++ b/core/src/main/java/org/elasticsearch/tribe/TribeService.java @@ -41,6 +41,8 @@ import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -52,7 +54,10 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.node.Node; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.transport.TransportSettings; +import java.util.Arrays; import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; @@ -154,6 +159,15 @@ public class TribeService extends AbstractLifecycleComponent { public static final Set TRIBE_SETTING_KEYS = Sets.newHashSet(TRIBE_NAME_SETTING.getKey(), ON_CONFLICT_SETTING.getKey(), BLOCKS_METADATA_INDICES_SETTING.getKey(), BLOCKS_METADATA_SETTING.getKey(), BLOCKS_READ_INDICES_SETTING.getKey(), BLOCKS_WRITE_INDICES_SETTING.getKey(), BLOCKS_WRITE_SETTING.getKey()); + // these settings should be passed through to each tribe client, if they are not set explicitly + private static final List> PASS_THROUGH_SETTINGS = Arrays.asList( + NetworkService.GLOBAL_NETWORK_HOST_SETTING, + NetworkService.GLOBAL_NETWORK_BINDHOST_SETTING, + NetworkService.GLOBAL_NETWORK_PUBLISHHOST_SETTING, + TransportSettings.HOST, + TransportSettings.BIND_HOST, + TransportSettings.PUBLISH_HOST + ); private final String onConflict; private final Set droppedIndices = ConcurrentCollections.newConcurrentSet(); @@ -167,18 +181,8 @@ public class TribeService extends AbstractLifecycleComponent { nodesSettings.remove("blocks"); // remove prefix settings that don't indicate a client nodesSettings.remove("on_conflict"); // remove prefix settings that don't indicate a client for (Map.Entry entry : nodesSettings.entrySet()) { - Settings.Builder sb = Settings.builder().put(entry.getValue()); - sb.put("node.name", settings.get("node.name") + "/" + entry.getKey()); - sb.put(Environment.PATH_HOME_SETTING.getKey(), Environment.PATH_HOME_SETTING.get(settings)); // pass through ES home dir - if (Environment.PATH_CONF_SETTING.exists(settings)) { - sb.put(Environment.PATH_CONF_SETTING.getKey(), Environment.PATH_CONF_SETTING.get(settings)); - } - sb.put(TRIBE_NAME_SETTING.getKey(), entry.getKey()); - if (sb.get("http.enabled") == null) { - sb.put("http.enabled", false); - } - sb.put(Node.NODE_CLIENT_SETTING.getKey(), true); - nodes.add(new TribeClientNode(sb.build())); + Settings clientSettings = buildClientSettings(entry.getKey(), settings, entry.getValue()); + nodes.add(new TribeClientNode(clientSettings)); } this.blockIndicesMetadata = BLOCKS_METADATA_INDICES_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY); @@ -197,6 +201,46 @@ public class TribeService extends AbstractLifecycleComponent { this.onConflict = ON_CONFLICT_SETTING.get(settings); } + // pkg private for testing + /** + * Builds node settings for a tribe client node from the tribe node's global settings, + * combined with tribe specific settings. + */ + static Settings buildClientSettings(String tribeName, Settings globalSettings, Settings tribeSettings) { + for (String tribeKey : tribeSettings.getAsMap().keySet()) { + if (tribeKey.startsWith("path.")) { + throw new IllegalArgumentException("Setting [" + tribeKey + "] not allowed in tribe client [" + tribeName + "]"); + } + } + Settings.Builder sb = Settings.builder().put(tribeSettings); + sb.put("node.name", globalSettings.get("node.name") + "/" + tribeName); + sb.put(Environment.PATH_HOME_SETTING.getKey(), Environment.PATH_HOME_SETTING.get(globalSettings)); // pass through ES home dir + if (Environment.PATH_CONF_SETTING.exists(globalSettings)) { + sb.put(Environment.PATH_CONF_SETTING.getKey(), Environment.PATH_CONF_SETTING.get(globalSettings)); + } + if (Environment.PATH_PLUGINS_SETTING.exists(globalSettings)) { + sb.put(Environment.PATH_PLUGINS_SETTING.getKey(), Environment.PATH_PLUGINS_SETTING.get(globalSettings)); + } + if (Environment.PATH_LOGS_SETTING.exists(globalSettings)) { + sb.put(Environment.PATH_LOGS_SETTING.getKey(), Environment.PATH_LOGS_SETTING.get(globalSettings)); + } + if (Environment.PATH_SCRIPTS_SETTING.exists(globalSettings)) { + sb.put(Environment.PATH_SCRIPTS_SETTING.getKey(), Environment.PATH_SCRIPTS_SETTING.get(globalSettings)); + } + for (Setting passthrough : PASS_THROUGH_SETTINGS) { + if (passthrough.exists(tribeSettings) == false && passthrough.exists(globalSettings)) { + sb.put(passthrough.getKey(), globalSettings.get(passthrough.getKey())); + } + } + sb.put(TRIBE_NAME_SETTING.getKey(), tribeName); + if (sb.get(NetworkModule.HTTP_ENABLED.getKey()) == null) { + sb.put(NetworkModule.HTTP_ENABLED.getKey(), false); + } + sb.put(Node.NODE_CLIENT_SETTING.getKey(), true); + return sb.build(); + } + + @Override protected void doStart() { if (nodes.isEmpty() == false) { diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java index 5109ab979cf..586f178d12d 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java @@ -237,8 +237,8 @@ public class CancellableTasksTests extends TaskManagerTestCase { // Cancel main task CancelTasksRequest request = new CancelTasksRequest(); - request.reason("Testing Cancellation"); - request.taskId(new TaskId(testNodes[0].discoveryNode.getId(), mainTask.getId())); + request.setReason("Testing Cancellation"); + request.setTaskId(new TaskId(testNodes[0].discoveryNode.getId(), mainTask.getId())); // And send the cancellation request to a random node CancelTasksResponse response = testNodes[randomIntBetween(0, testNodes.length - 1)].transportCancelTasksAction.execute(request) .get(); @@ -270,7 +270,7 @@ public class CancellableTasksTests extends TaskManagerTestCase { // Make sure that tasks are no longer running ListTasksResponse listTasksResponse = testNodes[randomIntBetween(0, testNodes.length - 1)] - .transportListTasksAction.execute(new ListTasksRequest().taskId( + .transportListTasksAction.execute(new ListTasksRequest().setTaskId( new TaskId(testNodes[0].discoveryNode.getId(), mainTask.getId()))).get(); assertEquals(0, listTasksResponse.getTasks().size()); @@ -313,7 +313,7 @@ public class CancellableTasksTests extends TaskManagerTestCase { // Make sure that tasks are running ListTasksResponse listTasksResponse = testNodes[randomIntBetween(0, testNodes.length - 1)] - .transportListTasksAction.execute(new ListTasksRequest().parentTaskId(new TaskId(mainNode, mainTask.getId()))).get(); + .transportListTasksAction.execute(new ListTasksRequest().setParentTaskId(new TaskId(mainNode, mainTask.getId()))).get(); assertThat(listTasksResponse.getTasks().size(), greaterThanOrEqualTo(blockOnNodes.size())); // Simulate the coordinating node leaving the cluster @@ -331,8 +331,8 @@ public class CancellableTasksTests extends TaskManagerTestCase { logger.info("--> Simulate issuing cancel request on the node that is about to leave the cluster"); // Simulate issuing cancel request on the node that is about to leave the cluster CancelTasksRequest request = new CancelTasksRequest(); - request.reason("Testing Cancellation"); - request.taskId(new TaskId(testNodes[0].discoveryNode.getId(), mainTask.getId())); + request.setReason("Testing Cancellation"); + request.setTaskId(new TaskId(testNodes[0].discoveryNode.getId(), mainTask.getId())); // And send the cancellation request to a random node CancelTasksResponse response = testNodes[0].transportCancelTasksAction.execute(request).get(); logger.info("--> Done simulating issuing cancel request on the node that is about to leave the cluster"); @@ -356,7 +356,7 @@ public class CancellableTasksTests extends TaskManagerTestCase { // Make sure that tasks are no longer running try { ListTasksResponse listTasksResponse1 = testNodes[randomIntBetween(1, testNodes.length - 1)] - .transportListTasksAction.execute(new ListTasksRequest().taskId(new TaskId(mainNode, mainTask.getId()))).get(); + .transportListTasksAction.execute(new ListTasksRequest().setTaskId(new TaskId(mainNode, mainTask.getId()))).get(); assertEquals(0, listTasksResponse1.getTasks().size()); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java index eaa3caf9084..8c791a99018 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java @@ -18,6 +18,8 @@ */ package org.elasticsearch.action.admin.cluster.node.tasks; +import org.elasticsearch.ElasticsearchTimeoutException; +import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.ListenableActionFuture; import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; @@ -40,6 +42,7 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.tasks.MockTaskManager; import org.elasticsearch.test.tasks.MockTaskManagerListener; import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.transport.ReceiveTimeoutTransportException; import java.io.IOException; import java.util.ArrayList; @@ -54,8 +57,11 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.ReentrantLock; import java.util.function.Function; +import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; +import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.emptyCollectionOf; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.not; @@ -327,6 +333,78 @@ public class TasksIT extends ESIntegTestCase { assertEquals(0, client().admin().cluster().prepareListTasks().setActions(TestTaskPlugin.TestTaskAction.NAME + "[n]").get().getTasks().size()); } + public void testTasksListWaitForCompletion() throws Exception { + // Start blocking test task + ListenableActionFuture future = TestTaskPlugin.TestTaskAction.INSTANCE.newRequestBuilder(client()) + .execute(); + + ListenableActionFuture waitResponseFuture; + try { + // Wait for the task to start on all nodes + assertBusy(() -> assertEquals(internalCluster().numDataAndMasterNodes(), + client().admin().cluster().prepareListTasks().setActions(TestTaskPlugin.TestTaskAction.NAME + "[n]").get().getTasks().size())); + + // Spin up a request to wait for that task to finish + waitResponseFuture = client().admin().cluster().prepareListTasks() + .setActions(TestTaskPlugin.TestTaskAction.NAME + "[n]").setWaitForCompletion(true).execute(); + } finally { + // Unblock the request so the wait for completion request can finish + TestTaskPlugin.UnblockTestTasksAction.INSTANCE.newRequestBuilder(client()).get(); + } + + // Now that the task is unblocked the list response will come back + ListTasksResponse waitResponse = waitResponseFuture.get(); + // If any tasks come back then they are the tasks we asked for - it'd be super weird if this wasn't true + for (TaskInfo task: waitResponse.getTasks()) { + assertEquals(task.getAction(), TestTaskPlugin.TestTaskAction.NAME + "[n]"); + } + // See the next test to cover the timeout case + + future.get(); + } + + public void testTasksListWaitForTimeout() throws Exception { + // Start blocking test task + ListenableActionFuture future = TestTaskPlugin.TestTaskAction.INSTANCE.newRequestBuilder(client()) + .execute(); + try { + // Wait for the task to start on all nodes + assertBusy(() -> assertEquals(internalCluster().numDataAndMasterNodes(), + client().admin().cluster().prepareListTasks().setActions(TestTaskPlugin.TestTaskAction.NAME + "[n]").get().getTasks().size())); + + // Spin up a request that should wait for those tasks to finish + // It will timeout because we haven't unblocked the tasks + ListTasksResponse waitResponse = client().admin().cluster().prepareListTasks() + .setActions(TestTaskPlugin.TestTaskAction.NAME + "[n]").setWaitForCompletion(true).setTimeout(timeValueMillis(100)) + .get(); + + assertFalse(waitResponse.getNodeFailures().isEmpty()); + for (FailedNodeException failure : waitResponse.getNodeFailures()) { + Throwable timeoutException = failure.getCause(); + // The exception sometimes comes back wrapped depending on the client + if (timeoutException.getCause() != null) { + timeoutException = timeoutException.getCause(); + } + assertThat(timeoutException, + either(instanceOf(ElasticsearchTimeoutException.class)).or(instanceOf(ReceiveTimeoutTransportException.class))); + } + } finally { + // Now we can unblock those requests + TestTaskPlugin.UnblockTestTasksAction.INSTANCE.newRequestBuilder(client()).get(); + } + future.get(); + } + + public void testTasksListWaitForNoTask() throws Exception { + // Spin up a request to wait for no matching tasks + ListenableActionFuture waitResponseFuture = client().admin().cluster().prepareListTasks() + .setActions(TestTaskPlugin.TestTaskAction.NAME + "[n]").setWaitForCompletion(true).setTimeout(timeValueMillis(10)) + .execute(); + + // It should finish quickly and without complaint + assertThat(waitResponseFuture.get().getTasks(), emptyCollectionOf(TaskInfo.class)); + } + @Override public void tearDown() throws Exception { for (Map.Entry, RecordingTaskManagerListener> entry : listeners.entrySet()) { diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java index 0d4372a51eb..e8dcd228e50 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java @@ -345,7 +345,10 @@ public class TestTaskPlugin extends Plugin { public static class UnblockTestTasksRequest extends BaseTasksRequest { - + @Override + public boolean match(Task task) { + return task instanceof TestTask && super.match(task); + } } public static class UnblockTestTasksResponse extends BaseTasksResponse { diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java index e1501f9b14c..556eee238fd 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java @@ -355,7 +355,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase { int testNodeNum = randomIntBetween(0, testNodes.length - 1); TestNode testNode = testNodes[testNodeNum]; ListTasksRequest listTasksRequest = new ListTasksRequest(); - listTasksRequest.actions("testAction*"); // pick all test actions + listTasksRequest.setActions("testAction*"); // pick all test actions logger.info("Listing currently running tasks using node [{}]", testNodeNum); ListTasksResponse response = testNode.transportListTasksAction.execute(listTasksRequest).get(); logger.info("Checking currently running tasks"); @@ -371,7 +371,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase { // Check task counts using transport with filtering testNode = testNodes[randomIntBetween(0, testNodes.length - 1)]; listTasksRequest = new ListTasksRequest(); - listTasksRequest.actions("testAction[n]"); // only pick node actions + listTasksRequest.setActions("testAction[n]"); // only pick node actions response = testNode.transportListTasksAction.execute(listTasksRequest).get(); assertEquals(testNodes.length, response.getPerNodeTasks().size()); for (Map.Entry> entry : response.getPerNodeTasks().entrySet()) { @@ -380,7 +380,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase { } // Check task counts using transport with detailed description - listTasksRequest.detailed(true); // same request only with detailed description + listTasksRequest.setDetailed(true); // same request only with detailed description response = testNode.transportListTasksAction.execute(listTasksRequest).get(); assertEquals(testNodes.length, response.getPerNodeTasks().size()); for (Map.Entry> entry : response.getPerNodeTasks().entrySet()) { @@ -389,7 +389,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase { } // Make sure that the main task on coordinating node is the task that was returned to us by execute() - listTasksRequest.actions("testAction"); // only pick the main task + listTasksRequest.setActions("testAction"); // only pick the main task response = testNode.transportListTasksAction.execute(listTasksRequest).get(); assertEquals(1, response.getTasks().size()); assertEquals(mainTask.getId(), response.getTasks().get(0).getId()); @@ -417,7 +417,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase { // Get the parent task ListTasksRequest listTasksRequest = new ListTasksRequest(); - listTasksRequest.actions("testAction"); + listTasksRequest.setActions("testAction"); ListTasksResponse response = testNode.transportListTasksAction.execute(listTasksRequest).get(); assertEquals(1, response.getTasks().size()); String parentNode = response.getTasks().get(0).getNode().getId(); @@ -425,7 +425,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase { // Find tasks with common parent listTasksRequest = new ListTasksRequest(); - listTasksRequest.parentTaskId(new TaskId(parentNode, parentTaskId)); + listTasksRequest.setParentTaskId(new TaskId(parentNode, parentTaskId)); response = testNode.transportListTasksAction.execute(listTasksRequest).get(); assertEquals(testNodes.length, response.getTasks().size()); for (TaskInfo task : response.getTasks()) { @@ -451,7 +451,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase { // Get the parent task ListTasksRequest listTasksRequest = new ListTasksRequest(); - listTasksRequest.actions("testAction*"); + listTasksRequest.setActions("testAction*"); ListTasksResponse response = testNode.transportListTasksAction.execute(listTasksRequest).get(); assertEquals(0, response.getTasks().size()); @@ -472,7 +472,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase { // Check task counts using transport with filtering TestNode testNode = testNodes[randomIntBetween(0, testNodes.length - 1)]; ListTasksRequest listTasksRequest = new ListTasksRequest(); - listTasksRequest.actions("testAction[n]"); // only pick node actions + listTasksRequest.setActions("testAction[n]"); // only pick node actions ListTasksResponse response = testNode.transportListTasksAction.execute(listTasksRequest).get(); assertEquals(testNodes.length, response.getPerNodeTasks().size()); for (Map.Entry> entry : response.getPerNodeTasks().entrySet()) { @@ -482,7 +482,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase { // Check task counts using transport with detailed description long minimalDurationNanos = System.nanoTime() - maximumStartTimeNanos; - listTasksRequest.detailed(true); // same request only with detailed description + listTasksRequest.setDetailed(true); // same request only with detailed description response = testNode.transportListTasksAction.execute(listTasksRequest).get(); assertEquals(testNodes.length, response.getPerNodeTasks().size()); for (Map.Entry> entry : response.getPerNodeTasks().entrySet()) { @@ -518,9 +518,9 @@ public class TransportTasksActionTests extends TaskManagerTestCase { // Try to cancel main task using action name CancelTasksRequest request = new CancelTasksRequest(); - request.nodesIds(testNodes[0].discoveryNode.getId()); - request.reason("Testing Cancellation"); - request.actions(actionName); + request.setNodesIds(testNodes[0].discoveryNode.getId()); + request.setReason("Testing Cancellation"); + request.setActions(actionName); CancelTasksResponse response = testNodes[randomIntBetween(0, testNodes.length - 1)].transportCancelTasksAction.execute(request) .get(); @@ -532,8 +532,8 @@ public class TransportTasksActionTests extends TaskManagerTestCase { // Try to cancel main task using id request = new CancelTasksRequest(); - request.reason("Testing Cancellation"); - request.taskId(new TaskId(testNodes[0].discoveryNode.getId(), task.getId())); + request.setReason("Testing Cancellation"); + request.setTaskId(new TaskId(testNodes[0].discoveryNode.getId(), task.getId())); response = testNodes[randomIntBetween(0, testNodes.length - 1)].transportCancelTasksAction.execute(request).get(); // Shouldn't match any tasks since testAction doesn't support cancellation @@ -544,7 +544,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase { // Make sure that task is still running ListTasksRequest listTasksRequest = new ListTasksRequest(); - listTasksRequest.actions(actionName); + listTasksRequest.setActions(actionName); ListTasksResponse listResponse = testNodes[randomIntBetween(0, testNodes.length - 1)].transportListTasksAction.execute (listTasksRequest).get(); assertEquals(1, listResponse.getPerNodeTasks().size()); @@ -617,7 +617,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase { // Run task action on node tasks that are currently running // should be successful on all nodes except one TestTasksRequest testTasksRequest = new TestTasksRequest(); - testTasksRequest.actions("testAction[n]"); // pick all test actions + testTasksRequest.setActions("testAction[n]"); // pick all test actions TestTasksResponse response = tasksActions[0].execute(testTasksRequest).get(); // Get successful responses from all nodes except one assertEquals(testNodes.length - 1, response.tasks.size()); diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java index 2e39c39cfd2..c31993ebb81 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java @@ -32,6 +32,7 @@ import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.common.collect.ImmutableOpenIntMap; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; @@ -157,6 +158,7 @@ public class IndicesShardStoreRequestIT extends ESIntegTestCase { .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "5") .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) )); + indexRandomData(index); ensureGreen(index); @@ -165,9 +167,10 @@ public class IndicesShardStoreRequestIT extends ESIntegTestCase { logger.info("--> corrupt random shard copies"); Map> corruptedShardIDMap = new HashMap<>(); + Index idx = resolveIndex(index); for (String node : internalCluster().nodesInclude(index)) { IndicesService indexServices = internalCluster().getInstance(IndicesService.class, node); - IndexService indexShards = indexServices.indexServiceSafe(index); + IndexService indexShards = indexServices.indexServiceSafe(idx); for (Integer shardId : indexShards.shardIds()) { IndexShard shard = indexShards.getShard(shardId); if (randomBoolean()) { diff --git a/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java b/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java index cf7b6745c8e..462a44e08b4 100644 --- a/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java @@ -113,7 +113,7 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase { @Override protected ShardIterator shards(ClusterState clusterState, Request request) { - return clusterState.routingTable().index(request.concreteIndex()).shard(request.shardId).primaryShardIt(); + return clusterState.routingTable().index(request.concreteIndex()).shard(request.shardId.getId()).primaryShardIt(); } } @@ -178,7 +178,7 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase { public void testBasicRequestWorks() throws InterruptedException, ExecutionException, TimeoutException { Request request = new Request().index("test"); - request.shardId = 0; + request.shardId = new ShardId("test", "_na_", 0); PlainActionFuture listener = new PlainActionFuture<>(); clusterService.setState(ClusterStateCreationUtils.state("test", randomBoolean(), ShardRoutingState.STARTED)); action.new AsyncSingleAction(request, listener).start(); @@ -189,7 +189,7 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase { public void testFailureWithoutRetry() throws Exception { Request request = new Request().index("test"); - request.shardId = 0; + request.shardId = new ShardId("test", "_na_", 0); PlainActionFuture listener = new PlainActionFuture<>(); clusterService.setState(ClusterStateCreationUtils.state("test", randomBoolean(), ShardRoutingState.STARTED)); @@ -215,7 +215,7 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase { public void testSuccessAfterRetryWithClusterStateUpdate() throws Exception { Request request = new Request().index("test"); - request.shardId = 0; + request.shardId = new ShardId("test", "_na_", 0); PlainActionFuture listener = new PlainActionFuture<>(); boolean local = randomBoolean(); clusterService.setState(ClusterStateCreationUtils.state("test", local, ShardRoutingState.INITIALIZING)); @@ -231,7 +231,7 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase { public void testSuccessAfterRetryWithExceptionFromTransport() throws Exception { Request request = new Request().index("test"); - request.shardId = 0; + request.shardId = new ShardId("test", "_na_", 0); PlainActionFuture listener = new PlainActionFuture<>(); boolean local = randomBoolean(); clusterService.setState(ClusterStateCreationUtils.state("test", local, ShardRoutingState.STARTED)); @@ -250,7 +250,7 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase { public void testRetryOfAnAlreadyTimedOutRequest() throws Exception { Request request = new Request().index("test").timeout(new TimeValue(0, TimeUnit.MILLISECONDS)); - request.shardId = 0; + request.shardId = new ShardId("test", "_na_", 0); PlainActionFuture listener = new PlainActionFuture<>(); clusterService.setState(ClusterStateCreationUtils.state("test", randomBoolean(), ShardRoutingState.STARTED)); action.new AsyncSingleAction(request, listener).start(); @@ -299,7 +299,7 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase { } }; Request request = new Request().index("test"); - request.shardId = 0; + request.shardId = new ShardId("test", "_na_", 0); PlainActionFuture listener = new PlainActionFuture<>(); clusterService.setState(ClusterStateCreationUtils.state("test", randomBoolean(), ShardRoutingState.STARTED)); action.new AsyncSingleAction(request, listener).start(); diff --git a/core/src/test/java/org/elasticsearch/bootstrap/BootstrapCheckTests.java b/core/src/test/java/org/elasticsearch/bootstrap/BootstrapCheckTests.java index 45986eab00e..3c269c39004 100644 --- a/core/src/test/java/org/elasticsearch/bootstrap/BootstrapCheckTests.java +++ b/core/src/test/java/org/elasticsearch/bootstrap/BootstrapCheckTests.java @@ -131,7 +131,7 @@ public class BootstrapCheckTests extends ESTestCase { } public void testMaxNumberOfThreadsCheck() { - final int limit = 1 << 15; + final int limit = 1 << 11; final AtomicLong maxNumberOfThreads = new AtomicLong(randomIntBetween(1, limit - 1)); final BootstrapCheck.MaxNumberOfThreadsCheck check = new BootstrapCheck.MaxNumberOfThreadsCheck() { @Override diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java b/core/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java index cefd3a6703a..fc43f4154d1 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.index.Index; import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; @@ -37,6 +38,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; import static org.hamcrest.Matchers.equalTo; @@ -220,7 +222,7 @@ public class ClusterChangedEventTests extends ESTestCase { final ClusterState newState = nextState(previousState, changeClusterUUID, addedIndices, delIndices, 0); final ClusterChangedEvent event = new ClusterChangedEvent("_na_", newState, previousState); final List addsFromEvent = event.indicesCreated(); - final List delsFromEvent = event.indicesDeleted(); + final List delsFromEvent = event.indicesDeleted().stream().map((s) -> s.getName()).collect(Collectors.toList()); Collections.sort(addsFromEvent); Collections.sort(delsFromEvent); assertThat(addsFromEvent, equalTo(addedIndices)); diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java index d9cf9f0d790..744477d6722 100644 --- a/core/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java @@ -47,6 +47,8 @@ public class WildcardExpressionResolverTests extends ESTestCase { assertThat(newHashSet(resolver.resolve(context, Arrays.asList("test*"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testX*"))), equalTo(newHashSet("testXXX", "testXYY"))); assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testX*", "kuku"))), equalTo(newHashSet("testXXX", "testXYY", "kuku"))); + assertThat(newHashSet(resolver.resolve(context, Arrays.asList("*"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY", "kuku"))); + assertThat(newHashSet(resolver.resolve(context, Arrays.asList("*", "-kuku"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); } public void testConvertWildcardsTests() { @@ -107,6 +109,18 @@ public class WildcardExpressionResolverTests extends ESTestCase { assertThat(newHashSet(resolver.resolve(context, Arrays.asList("*Y*X"))).size(), equalTo(0)); } + public void testAll() { + MetaData.Builder mdBuilder = MetaData.builder() + .put(indexBuilder("testXXX")) + .put(indexBuilder("testXYY")) + .put(indexBuilder("testYYY")); + ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); + IndexNameExpressionResolver.WildcardExpressionResolver resolver = new IndexNameExpressionResolver.WildcardExpressionResolver(); + + IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen()); + assertThat(newHashSet(resolver.resolve(context, Arrays.asList("_all"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); + } + private IndexMetaData.Builder indexBuilder(String index) { return IndexMetaData.builder(index).settings(settings(Version.CURRENT).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)); } diff --git a/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java b/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java index 6cc9912924d..a190de5b702 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java @@ -216,6 +216,13 @@ public class ScopedSettingsTests extends ESTestCase { } catch (IllegalArgumentException e) { assertEquals("Failed to parse value [true] for setting [index.number_of_replicas]", e.getMessage()); } + + try { + settings.validate("index.similarity.classic.type", Settings.builder().put("index.similarity.classic.type", "mine").build()); + fail(); + } catch (IllegalArgumentException e) { + assertEquals("illegal value for [index.similarity.classic] cannot redefine built-in similarity", e.getMessage()); + } } diff --git a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java index b9d7107ed54..3948a4bab90 100644 --- a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java @@ -177,13 +177,17 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { } private void configureUnicastCluster(int numberOfNodes, @Nullable int[] unicastHostsOrdinals, int minimumMasterNode) throws ExecutionException, InterruptedException { + configureUnicastCluster(DEFAULT_SETTINGS, numberOfNodes, unicastHostsOrdinals, minimumMasterNode); + } + + private void configureUnicastCluster(Settings settings, int numberOfNodes, @Nullable int[] unicastHostsOrdinals, int minimumMasterNode) throws ExecutionException, InterruptedException { if (minimumMasterNode < 0) { minimumMasterNode = numberOfNodes / 2 + 1; } logger.info("---> configured unicast"); // TODO: Rarely use default settings form some of these Settings nodeSettings = Settings.builder() - .put(DEFAULT_SETTINGS) + .put(settings) .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), minimumMasterNode) .build(); @@ -196,7 +200,6 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { } } - /** * Test that no split brain occurs under partial network partition. See https://github.com/elastic/elasticsearch/issues/2488 */ @@ -1075,25 +1078,40 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { * Tests that indices are properly deleted even if there is a master transition in between. * Test for https://github.com/elastic/elasticsearch/issues/11665 */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/16890") public void testIndicesDeleted() throws Exception { - configureUnicastCluster(3, null, 2); + final Settings settings = Settings.builder() + .put(DEFAULT_SETTINGS) + .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "0s") // don't wait on isolated data node + .put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "30s") // wait till cluster state is committed + .build(); + final String idxName = "test"; + configureUnicastCluster(settings, 3, null, 2); InternalTestCluster.Async> masterNodes = internalCluster().startMasterOnlyNodesAsync(2); InternalTestCluster.Async dataNode = internalCluster().startDataOnlyNodeAsync(); dataNode.get(); - masterNodes.get(); + final List allMasterEligibleNodes = masterNodes.get(); ensureStableCluster(3); assertAcked(prepareCreate("test")); ensureYellow(); - String masterNode1 = internalCluster().getMasterName(); + final String masterNode1 = internalCluster().getMasterName(); NetworkPartition networkPartition = new NetworkUnresponsivePartition(masterNode1, dataNode.get(), getRandom()); internalCluster().setDisruptionScheme(networkPartition); networkPartition.startDisrupting(); - internalCluster().client(masterNode1).admin().indices().prepareDelete("test").setTimeout("1s").get(); + // We know this will time out due to the partition, we check manually below to not proceed until + // the delete has been applied to the master node and the master eligible node. + internalCluster().client(masterNode1).admin().indices().prepareDelete(idxName).setTimeout("0s").get(); + // Don't restart the master node until we know the index deletion has taken effect on master and the master eligible node. + assertBusy(() -> { + for (String masterNode : allMasterEligibleNodes) { + final ClusterState masterState = internalCluster().clusterService(masterNode).state(); + assertTrue("index not deleted on " + masterNode, masterState.metaData().hasIndex(idxName) == false && + masterState.status() == ClusterState.ClusterStateStatus.APPLIED); + } + }); internalCluster().restartNode(masterNode1, InternalTestCluster.EMPTY_CALLBACK); ensureYellow(); - assertFalse(client().admin().indices().prepareExists("test").get().isExists()); + assertFalse(client().admin().indices().prepareExists(idxName).get().isExists()); } protected NetworkPartition addRandomPartition() { diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingIT.java b/core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingIT.java index b247dad069e..88d375699a1 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingIT.java @@ -33,7 +33,6 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.discovery.zen.ping.PingContextProvider; import org.elasticsearch.discovery.zen.ping.ZenPing; -import org.elasticsearch.node.service.NodeService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -82,11 +81,6 @@ public class UnicastZenPingIT extends ESTestCase { return DiscoveryNodes.builder().put(nodeA).localNodeId("UZP_A").build(); } - @Override - public NodeService nodeService() { - return null; - } - @Override public boolean nodeHasJoinedClusterOnce() { return false; @@ -101,11 +95,6 @@ public class UnicastZenPingIT extends ESTestCase { return DiscoveryNodes.builder().put(nodeB).localNodeId("UZP_B").build(); } - @Override - public NodeService nodeService() { - return null; - } - @Override public boolean nodeHasJoinedClusterOnce() { return true; diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java index 224ecbdf619..7e31f6055de 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java @@ -43,7 +43,6 @@ import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.DiscoveryNodesProvider; import org.elasticsearch.node.Node; -import org.elasticsearch.node.service.NodeService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; @@ -134,11 +133,6 @@ public class PublishClusterStateActionTests extends ESTestCase { return clusterState.nodes(); } - @Override - public NodeService nodeService() { - assert false; - throw new UnsupportedOperationException("Shouldn't be here"); - } } public MockNode createMockNode(final String name) throws Exception { diff --git a/core/src/test/java/org/elasticsearch/index/IndexTests.java b/core/src/test/java/org/elasticsearch/index/IndexTests.java new file mode 100644 index 00000000000..6ce38c6acba --- /dev/null +++ b/core/src/test/java/org/elasticsearch/index/IndexTests.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.common.Strings; +import org.elasticsearch.test.ESTestCase; + +import static org.apache.lucene.util.TestUtil.randomSimpleString; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.not; + +public class IndexTests extends ESTestCase { + public void testToString() { + assertEquals("[name/uuid]", new Index("name", "uuid").toString()); + assertEquals("[name]", new Index("name", ClusterState.UNKNOWN_UUID).toString()); + + Index random = new Index(randomSimpleString(random(), 1, 100), + usually() ? Strings.randomBase64UUID(random()) : ClusterState.UNKNOWN_UUID); + assertThat(random.toString(), containsString(random.getName())); + if (ClusterState.UNKNOWN_UUID.equals(random.getUUID())) { + assertThat(random.toString(), not(containsString(random.getUUID()))); + } else { + assertThat(random.toString(), containsString(random.getUUID())); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java b/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java index a7d127a60c8..aa3da8fc840 100644 --- a/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java +++ b/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java @@ -156,10 +156,11 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase { assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); ensureGreen(); refresh(); - + Index index = resolveIndex("foo-copy"); for (IndicesService service : internalCluster().getDataNodeInstances(IndicesService.class)) { - if (service.hasIndex("foo-copy")) { - IndexShard shard = service.indexServiceSafe("foo-copy").getShardOrNull(0); + + if (service.hasIndex(index)) { + IndexShard shard = service.indexServiceSafe(index).getShardOrNull(0); if (shard.routingEntry().primary()) { assertFalse(shard instanceof ShadowIndexShard); } else { @@ -201,8 +202,9 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase { IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats(IDX).clear().setTranslog(true).get(); assertEquals(2, indicesStatsResponse.getIndex(IDX).getPrimaries().getTranslog().estimatedNumberOfOperations()); assertEquals(2, indicesStatsResponse.getIndex(IDX).getTotal().getTranslog().estimatedNumberOfOperations()); + Index index = resolveIndex(IDX); for (IndicesService service : internalCluster().getInstances(IndicesService.class)) { - IndexService indexService = service.indexService(IDX); + IndexService indexService = service.indexService(index); if (indexService != null) { IndexShard shard = indexService.getShard(0); TranslogStats translogStats = shard.translogStats(); diff --git a/core/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java b/core/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java index e3676366511..9e05122322a 100644 --- a/core/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java +++ b/core/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java @@ -36,24 +36,30 @@ import java.io.IOException; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.startsWith; public class IndexingSlowLogTests extends ESTestCase { public void testSlowLogParsedDocumentPrinterSourceToLog() throws IOException { BytesReference source = JsonXContent.contentBuilder().startObject().field("foo", "bar").endObject().bytes(); ParsedDocument pd = new ParsedDocument(new StringField("uid", "test:id", Store.YES), new LegacyIntField("version", 1, Store.YES), "id", "test", null, 0, -1, null, source, null); - + Index index = new Index("foo", "123"); // Turning off document logging doesn't log source[] - SlowLogParsedDocumentPrinter p = new SlowLogParsedDocumentPrinter(pd, 10, true, 0); + SlowLogParsedDocumentPrinter p = new SlowLogParsedDocumentPrinter(index, pd, 10, true, 0); assertThat(p.toString(), not(containsString("source["))); // Turning on document logging logs the whole thing - p = new SlowLogParsedDocumentPrinter(pd, 10, true, Integer.MAX_VALUE); + p = new SlowLogParsedDocumentPrinter(index, pd, 10, true, Integer.MAX_VALUE); assertThat(p.toString(), containsString("source[{\"foo\":\"bar\"}]")); // And you can truncate the source - p = new SlowLogParsedDocumentPrinter(pd, 10, true, 3); + p = new SlowLogParsedDocumentPrinter(index, pd, 10, true, 3); assertThat(p.toString(), containsString("source[{\"f]")); + + // And you can truncate the source + p = new SlowLogParsedDocumentPrinter(index, pd, 10, true, 3); + assertThat(p.toString(), containsString("source[{\"f]")); + assertThat(p.toString(), startsWith("[foo/123] took")); } public void testReformatSetting() { diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java index 6f8b5a45df0..66487c54bf2 100644 --- a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java @@ -168,7 +168,7 @@ public abstract class AbstractFieldDataTestCase extends ESSingleNodeTestCase { protected Nested createNested(IndexSearcher searcher, Query parentFilter, Query childFilter) throws IOException { BitsetFilterCache s = indexService.cache().bitsetFilterCache(); - return new Nested(s.getBitSetProducer(parentFilter), searcher.createNormalizedWeight(childFilter, false)); + return new Nested(s.getBitSetProducer(parentFilter), childFilter); } public void testEmpty() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/index/query/AbstractQueryTestCase.java b/core/src/test/java/org/elasticsearch/index/query/AbstractQueryTestCase.java index 53594f43080..4d2ffcdd11e 100644 --- a/core/src/test/java/org/elasticsearch/index/query/AbstractQueryTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/query/AbstractQueryTestCase.java @@ -140,6 +140,7 @@ public abstract class AbstractQueryTestCase> protected static final String DATE_FIELD_NAME = "mapped_date"; protected static final String OBJECT_FIELD_NAME = "mapped_object"; protected static final String GEO_POINT_FIELD_NAME = "mapped_geo_point"; + protected static final String GEO_POINT_FIELD_MAPPING = "type=geo_point,lat_lon=true,geohash=true,geohash_prefix=true"; protected static final String GEO_SHAPE_FIELD_NAME = "mapped_geo_shape"; protected static final String[] MAPPED_FIELD_NAMES = new String[] { STRING_FIELD_NAME, INT_FIELD_NAME, DOUBLE_FIELD_NAME, BOOLEAN_FIELD_NAME, DATE_FIELD_NAME, OBJECT_FIELD_NAME, GEO_POINT_FIELD_NAME, GEO_SHAPE_FIELD_NAME }; @@ -300,7 +301,7 @@ public abstract class AbstractQueryTestCase> BOOLEAN_FIELD_NAME, "type=boolean", DATE_FIELD_NAME, "type=date", OBJECT_FIELD_NAME, "type=object", - GEO_POINT_FIELD_NAME, "type=geo_point,lat_lon=true,geohash=true,geohash_prefix=true", + GEO_POINT_FIELD_NAME, GEO_POINT_FIELD_MAPPING, GEO_SHAPE_FIELD_NAME, "type=geo_shape" ).string()), MapperService.MergeReason.MAPPING_UPDATE, false); // also add mappings for two inner field in the object field diff --git a/core/src/test/java/org/elasticsearch/index/query/GeoDistanceRangeQueryTests.java b/core/src/test/java/org/elasticsearch/index/query/GeoDistanceRangeQueryTests.java index f07e695a1a0..cb0c374c5c0 100644 --- a/core/src/test/java/org/elasticsearch/index/query/GeoDistanceRangeQueryTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/GeoDistanceRangeQueryTests.java @@ -24,10 +24,12 @@ import org.apache.lucene.spatial.geopoint.search.GeoPointDistanceRangeQuery; import org.apache.lucene.spatial.util.GeoDistanceUtils; import org.apache.lucene.util.NumericUtils; import org.elasticsearch.Version; +import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.geo.GeoDistance; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.unit.DistanceUnit; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.search.geo.GeoDistanceRangeQuery; import org.elasticsearch.test.geo.RandomGeoGenerator; @@ -296,6 +298,36 @@ public class GeoDistanceRangeQueryTests extends AbstractQueryTestCase assertThat(query, instanceOf(TermQuery.class)); TermQuery termQuery = (TermQuery) query; Term term = termQuery.getTerm(); - assertThat(term.field(), equalTo(queryBuilder.fieldName() + GeoPointFieldMapper.Names.GEOHASH_SUFFIX)); + assertThat(term.field(), equalTo(queryBuilder.fieldName() + "." + GeoPointFieldMapper.Names.GEOHASH)); String geohash = queryBuilder.geohash(); if (queryBuilder.precision() != null) { int len = Math.min(queryBuilder.precision(), geohash.length()); diff --git a/core/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java index 0f7e2e67e38..beef2df15d4 100644 --- a/core/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java @@ -52,6 +52,7 @@ public class NestedQueryBuilderTests extends AbstractQueryTestCase { IndexStats indexStats = client().admin().indices().prepareStats("test").clear().get().getIndex("test"); assertNotNull(indexStats.getShards()[0].getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID)); - indicesService.indexService("test").getShardOrNull(0).checkIdle(0); + indicesService.indexService(resolveIndex("test")).getShardOrNull(0).checkIdle(0); } ); IndexStats indexStats = client().admin().indices().prepareStats("test").get().getIndex("test"); @@ -345,7 +345,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { ensureGreen(); client().prepareIndex("test", "bar", "1").setSource("{}").get(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); IndexShard shard = test.getShardOrNull(0); setDurability(shard, Translog.Durability.REQUEST); assertFalse(shard.getEngine().getTranslog().syncNeeded()); @@ -385,7 +385,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { client().prepareIndex("test", "test").setSource("{}").get(); ensureGreen("test"); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexShard test = indicesService.indexService("test").getShardOrNull(0); + IndexShard test = indicesService.indexService(resolveIndex("test")).getShardOrNull(0); assertEquals(versionCreated.luceneVersion, test.minimumCompatibleVersion()); client().prepareIndex("test", "test").setSource("{}").get(); assertEquals(versionCreated.luceneVersion, test.minimumCompatibleVersion()); @@ -396,7 +396,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { public void testUpdatePriority() { assertAcked(client().admin().indices().prepareCreate("test") .setSettings(IndexMetaData.SETTING_PRIORITY, 200)); - IndexService indexService = getInstanceFromNode(IndicesService.class).indexService("test"); + IndexService indexService = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); assertEquals(200, indexService.getIndexSettings().getSettings().getAsInt(IndexMetaData.SETTING_PRIORITY, 0).intValue()); client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_PRIORITY, 400).build()).get(); assertEquals(400, indexService.getIndexSettings().getSettings().getAsInt(IndexMetaData.SETTING_PRIORITY, 0).intValue()); @@ -410,7 +410,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { SearchResponse response = client().prepareSearch("test").get(); assertHitCount(response, 1L); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); IndexShard shard = test.getShardOrNull(0); ShardPath shardPath = shard.shardPath(); Path dataPath = shardPath.getDataPath(); @@ -530,7 +530,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { createIndex("test"); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); IndexShard shard = test.getShardOrNull(0); ShardStats stats = new ShardStats(shard.routingEntry(), shard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), shard, new CommonStatsFlags()), shard.commitStats()); assertEquals(shard.shardPath().getRootDataPath().toString(), stats.getDataPath()); @@ -570,7 +570,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { ensureGreen(); client().prepareIndex("test_iol", "test", "0").setSource("{\"foo\" : \"bar\"}").setRefresh(true).get(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test_iol"); + IndexService test = indicesService.indexService(resolveIndex("test_iol")); IndexShard shard = test.getShardOrNull(0); AtomicInteger preIndex = new AtomicInteger(); AtomicInteger postIndex = new AtomicInteger(); @@ -669,7 +669,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { createIndex("test", settingsBuilder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.REQUEST).build()); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); IndexShard shard = test.getShardOrNull(0); assertFalse(shard.shouldFlush()); client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(133 /* size of the operation + header&footer*/, ByteSizeUnit.BYTES)).build()).get(); @@ -703,7 +703,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { createIndex("test"); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); final IndexShard shard = test.getShardOrNull(0); assertFalse(shard.shouldFlush()); client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(133/* size of the operation + header&footer*/, ByteSizeUnit.BYTES)).build()).get(); @@ -749,7 +749,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { ).get()); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); final IndexShard shard = test.getShardOrNull(0); CountDownLatch latch = new CountDownLatch(1); Thread recoveryThread = new Thread(() -> { @@ -779,7 +779,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { ).get()); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); final IndexShard shard = test.getShardOrNull(0); final int numThreads = randomIntBetween(2, 4); Thread[] indexThreads = new Thread[numThreads]; @@ -830,7 +830,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { createIndex("test"); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); final IndexShard shard = test.getShardOrNull(0); int translogOps = 1; client().prepareIndex("test", "test", "0").setSource("{}").setRefresh(randomBoolean()).get(); @@ -861,7 +861,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { createIndex("test"); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); final IndexShard shard = test.getShardOrNull(0); client().prepareIndex("test", "test", "0").setSource("{}").setRefresh(randomBoolean()).get(); if (randomBoolean()) { @@ -892,7 +892,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); DiscoveryNode localNode = new DiscoveryNode("foo", DummyTransportAddress.INSTANCE, Version.CURRENT); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); final IndexShard shard = test.getShardOrNull(0); client().prepareIndex("test", "test", "0").setSource("{}").setRefresh(randomBoolean()).get(); @@ -945,7 +945,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { createIndex("test"); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); final IndexShard shard = test.getShardOrNull(0); ShardRouting origRouting = shard.routingEntry(); assertThat(shard.state(), equalTo(IndexShardState.STARTED)); @@ -967,8 +967,8 @@ public class IndexShardTests extends ESSingleNodeTestCase { createIndex("test_target"); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test"); - IndexService test_target = indicesService.indexService("test_target"); + IndexService test = indicesService.indexService(resolveIndex("test")); + IndexService test_target = indicesService.indexService(resolveIndex("test_target")); final IndexShard test_shard = test.getShardOrNull(0); client().prepareIndex("test", "test", "0").setSource("{}").setRefresh(randomBoolean()).get(); @@ -1029,7 +1029,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { createIndex("test"); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService indexService = indicesService.indexService("test"); + IndexService indexService = indicesService.indexService(resolveIndex("test")); IndexShard shard = indexService.getShardOrNull(0); client().prepareIndex("test", "test", "0").setSource("{\"foo\" : \"bar\"}").setRefresh(true).get(); client().prepareIndex("test", "test", "1").setSource("{\"foobar\" : \"bar\"}").setRefresh(true).get(); @@ -1078,7 +1078,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { createIndex("test"); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService indexService = indicesService.indexService("test"); + IndexService indexService = indicesService.indexService(resolveIndex("test")); IndexShard shard = indexService.getShardOrNull(0); client().prepareIndex("test", "test", "0").setSource("{\"foo\" : \"bar\"}").setRefresh(true).get(); client().prepareIndex("test", "test", "1").setSource("{\"foobar\" : \"bar\"}").setRefresh(true).get(); @@ -1126,7 +1126,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { createIndex("test"); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService indexService = indicesService.indexService("test"); + IndexService indexService = indicesService.indexService(resolveIndex("test")); IndexShard shard = indexService.getShardOrNull(0); client().prepareIndex("test", "test", "0").setSource("{\"foo\" : \"bar\"}").setRefresh(true).get(); IndexSearcherWrapper wrapper = new IndexSearcherWrapper() { @@ -1179,7 +1179,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { .endObject().endObject().endObject()).get(); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("testindexfortranslogsync"); + IndexService test = indicesService.indexService(resolveIndex("testindexfortranslogsync")); IndexShard shard = test.getShardOrNull(0); ShardRouting routing = new ShardRouting(shard.routingEntry()); test.removeShard(0, "b/c britta says so"); @@ -1206,7 +1206,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { .endObject().endObject().endObject()).get(); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("index"); + IndexService test = indicesService.indexService(resolveIndex("index")); IndexShard shard = test.getShardOrNull(0); ShardRouting routing = new ShardRouting(shard.routingEntry()); test.removeShard(0, "b/c britta says so"); @@ -1235,7 +1235,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { .endObject().endObject().endObject()).get(); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("index"); + IndexService test = indicesService.indexService(resolveIndex("index")); IndexShard shard = test.getShardOrNull(0); ShardRouting routing = new ShardRouting(shard.routingEntry()); test.removeShard(0, "b/c britta says so"); diff --git a/core/src/test/java/org/elasticsearch/index/similarity/SimilarityServiceTests.java b/core/src/test/java/org/elasticsearch/index/similarity/SimilarityServiceTests.java new file mode 100644 index 00000000000..edb337fd4e6 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/index/similarity/SimilarityServiceTests.java @@ -0,0 +1,62 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.index.similarity; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.IndexSettingsModule; + +import java.util.Collections; + +public class SimilarityServiceTests extends ESTestCase { + + // Tests #16594 + public void testOverrideBuiltInSimilarity() { + Settings settings = Settings.builder().put("index.similarity.BM25.type", "classic").build(); + IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", settings); + try { + new SimilarityService(indexSettings, Collections.emptyMap()); + fail("can't override bm25"); + } catch (IllegalArgumentException ex) { + assertEquals(ex.getMessage(), "Cannot redefine built-in Similarity [BM25]"); + } + } + + // Pre v3 indices could override built-in similarities + public void testOverrideBuiltInSimilarityPreV3() { + Settings settings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_0_0) + .put("index.similarity.BM25.type", "classic") + .build(); + IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", settings); + SimilarityService service = new SimilarityService(indexSettings, Collections.emptyMap()); + assertTrue(service.getSimilarity("BM25") instanceof ClassicSimilarityProvider); + } + + // Tests #16594 + public void testDefaultSimilarity() { + Settings settings = Settings.builder().put("index.similarity.default.type", "BM25").build(); + IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", settings); + SimilarityService service = new SimilarityService(indexSettings, Collections.emptyMap()); + assertTrue(service.getDefaultSimilarity() instanceof BM25SimilarityProvider); + } +} diff --git a/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java b/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java index afb9673508a..4f08c497443 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java +++ b/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java @@ -161,7 +161,7 @@ public class IndexingMemoryControllerTests extends ESSingleNodeTestCase { public void testShardAdditionAndRemoval() { createIndex("test", Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 3).put(SETTING_NUMBER_OF_REPLICAS, 0).build()); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); MockController controller = new MockController(Settings.builder() .put(IndexingMemoryController.INDEX_BUFFER_SIZE_SETTING, "4mb").build()); @@ -194,7 +194,7 @@ public class IndexingMemoryControllerTests extends ESSingleNodeTestCase { createIndex("test", Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 2).put(SETTING_NUMBER_OF_REPLICAS, 0).build()); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); MockController controller = new MockController(Settings.builder() .put(IndexingMemoryController.INDEX_BUFFER_SIZE_SETTING, "5mb") @@ -248,7 +248,7 @@ public class IndexingMemoryControllerTests extends ESSingleNodeTestCase { public void testThrottling() throws Exception { createIndex("test", Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 3).put(SETTING_NUMBER_OF_REPLICAS, 0).build()); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); MockController controller = new MockController(Settings.builder() .put(IndexingMemoryController.INDEX_BUFFER_SIZE_SETTING, "4mb").build()); @@ -316,7 +316,7 @@ public class IndexingMemoryControllerTests extends ESSingleNodeTestCase { ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService indexService = indicesService.indexService("index"); + IndexService indexService = indicesService.indexService(resolveIndex("index")); IndexShard shard = indexService.getShardOrNull(0); assertNotNull(shard); @@ -342,7 +342,7 @@ public class IndexingMemoryControllerTests extends ESSingleNodeTestCase { @Override protected long getIndexBufferRAMBytesUsed(IndexShard shard) { return shard.getIndexBufferRAMBytesUsed(); - } + } @Override protected void writeIndexingBufferAsync(IndexShard shard) { diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java b/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java index e34e1d6bd6b..367f4cd46ce 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java @@ -49,8 +49,9 @@ public class IndicesLifecycleListenerSingleNodeTests extends ESSingleNodeTestCas assertAcked(client().admin().indices().prepareCreate("test") .setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0)); ensureGreen(); - IndexMetaData metaData = indicesService.indexService("test").getMetaData(); - ShardRouting shardRouting = indicesService.indexService("test").getShard(0).routingEntry(); + Index idx = resolveIndex("test"); + IndexMetaData metaData = indicesService.indexService(idx).getMetaData(); + ShardRouting shardRouting = indicesService.indexService(idx).getShard(0).routingEntry(); final AtomicInteger counter = new AtomicInteger(1); IndexEventListener countingListener = new IndexEventListener() { @Override @@ -89,10 +90,11 @@ public class IndicesLifecycleListenerSingleNodeTests extends ESSingleNodeTestCas counter.incrementAndGet(); } }; - indicesService.deleteIndex("test", "simon says"); + indicesService.deleteIndex(idx, "simon says"); try { NodeServicesProvider nodeServicesProvider = getInstanceFromNode(NodeServicesProvider.class); IndexService index = indicesService.createIndex(nodeServicesProvider, metaData, Arrays.asList(countingListener)); + idx = index.index(); ShardRouting newRouting = new ShardRouting(shardRouting); String nodeId = newRouting.currentNodeId(); ShardRoutingHelper.moveToUnassigned(newRouting, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "boom")); @@ -106,7 +108,7 @@ public class IndicesLifecycleListenerSingleNodeTests extends ESSingleNodeTestCas ShardRoutingHelper.moveToStarted(newRouting); shard.updateRoutingEntry(newRouting, true); } finally { - indicesService.deleteIndex("test", "simon says"); + indicesService.deleteIndex(idx, "simon says"); } assertEquals(7, counter.get()); } diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java b/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java index e9f1f6be518..57a7f34e4b7 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java @@ -73,12 +73,14 @@ public class IndicesServiceTests extends ESSingleNodeTestCase { IndexMetaData meta = IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas( 1).build(); IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", meta.getSettings()); - assertFalse("no shard location", indicesService.canDeleteShardContent(new ShardId("test", "_na_", 0), indexSettings)); + ShardId shardId = new ShardId(meta.getIndex(), 0); + assertFalse("no shard location", indicesService.canDeleteShardContent(shardId, indexSettings)); IndexService test = createIndex("test"); + shardId = new ShardId(test.index(), 0); assertTrue(test.hasShard(0)); - assertFalse("shard is allocated", indicesService.canDeleteShardContent(new ShardId("test", "_na_", 0), indexSettings)); + assertFalse("shard is allocated", indicesService.canDeleteShardContent(shardId, test.getIndexSettings())); test.removeShard(0, "boom"); - assertTrue("shard is removed", indicesService.canDeleteShardContent(new ShardId("test", "_na_", 0), indexSettings)); + assertTrue("shard is removed", indicesService.canDeleteShardContent(shardId, test.getIndexSettings())); } public void testDeleteIndexStore() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java b/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java index 239cb7a9096..936e8ac600a 100644 --- a/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java +++ b/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java @@ -42,7 +42,7 @@ public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase { public void testModificationPreventsFlushing() throws InterruptedException { createIndex("test"); client().prepareIndex("test", "test", "1").setSource("{}").get(); - IndexService test = getInstanceFromNode(IndicesService.class).indexService("test"); + IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); IndexShard shard = test.getShardOrNull(0); SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); @@ -86,7 +86,7 @@ public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase { public void testSingleShardSuccess() throws InterruptedException { createIndex("test"); client().prepareIndex("test", "test", "1").setSource("{}").get(); - IndexService test = getInstanceFromNode(IndicesService.class).indexService("test"); + IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); IndexShard shard = test.getShardOrNull(0); SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); @@ -106,7 +106,7 @@ public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase { public void testSyncFailsIfOperationIsInFlight() throws InterruptedException { createIndex("test"); client().prepareIndex("test", "test", "1").setSource("{}").get(); - IndexService test = getInstanceFromNode(IndicesService.class).indexService("test"); + IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); IndexShard shard = test.getShardOrNull(0); SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); @@ -126,7 +126,7 @@ public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase { public void testSyncFailsOnIndexClosedOrMissing() throws InterruptedException { createIndex("test"); - IndexService test = getInstanceFromNode(IndicesService.class).indexService("test"); + IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); IndexShard shard = test.getShardOrNull(0); SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); @@ -159,7 +159,7 @@ public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase { public void testFailAfterIntermediateCommit() throws InterruptedException { createIndex("test"); client().prepareIndex("test", "test", "1").setSource("{}").get(); - IndexService test = getInstanceFromNode(IndicesService.class).indexService("test"); + IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); IndexShard shard = test.getShardOrNull(0); SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); @@ -192,7 +192,7 @@ public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase { public void testFailWhenCommitIsMissing() throws InterruptedException { createIndex("test"); client().prepareIndex("test", "test", "1").setSource("{}").get(); - IndexService test = getInstanceFromNode(IndicesService.class).indexService("test"); + IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); IndexShard shard = test.getShardOrNull(0); SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index 155032f1d8c..98d4f84c6ef 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -37,6 +37,7 @@ import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationComman import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.index.Index; import org.elasticsearch.index.recovery.RecoveryStats; import org.elasticsearch.index.store.Store; import org.elasticsearch.indices.IndicesService; @@ -261,14 +262,16 @@ public class IndexRecoveryIT extends ESIntegTestCase { .execute().actionGet().getState(); logger.info("--> waiting for recovery to start both on source and target"); + final Index index = resolveIndex(INDEX_NAME); assertBusy(new Runnable() { @Override public void run() { + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, nodeA); - assertThat(indicesService.indexServiceSafe(INDEX_NAME).getShard(0).recoveryStats().currentAsSource(), + assertThat(indicesService.indexServiceSafe(index).getShard(0).recoveryStats().currentAsSource(), equalTo(1)); indicesService = internalCluster().getInstance(IndicesService.class, nodeB); - assertThat(indicesService.indexServiceSafe(INDEX_NAME).getShard(0).recoveryStats().currentAsTarget(), + assertThat(indicesService.indexServiceSafe(index).getShard(0).recoveryStats().currentAsTarget(), equalTo(1)); } }); diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index b5f744ddc23..b69d1218546 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -94,7 +94,7 @@ public class RecoverySourceHandlerTests extends ESTestCase { @Override public void close() throws IOException { super.close(); - store.directory().sync(Collections.singleton(md.name())); // sync otherwise MDW will mess with it + targetStore.directory().sync(Collections.singleton(md.name())); // sync otherwise MDW will mess with it } }; } catch (IOException e) { diff --git a/core/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java b/core/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java index 78d5e2203f5..d85849570cf 100644 --- a/core/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java +++ b/core/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java @@ -65,7 +65,7 @@ public class UpdateSettingsIT extends ESIntegTestCase { IndexMetaData indexMetaData = client().admin().cluster().prepareState().execute().actionGet().getState().metaData().index("test"); assertEquals(indexMetaData.getSettings().get("index.refresh_interval"), "-1"); for (IndicesService service : internalCluster().getInstances(IndicesService.class)) { - IndexService indexService = service.indexService("test"); + IndexService indexService = service.indexService(resolveIndex("test")); if (indexService != null) { assertEquals(indexService.getIndexSettings().getRefreshInterval().millis(), -1); assertEquals(indexService.getIndexSettings().getFlushThresholdSize().bytes(), 1024); @@ -79,7 +79,7 @@ public class UpdateSettingsIT extends ESIntegTestCase { indexMetaData = client().admin().cluster().prepareState().execute().actionGet().getState().metaData().index("test"); assertNull(indexMetaData.getSettings().get("index.refresh_interval")); for (IndicesService service : internalCluster().getInstances(IndicesService.class)) { - IndexService indexService = service.indexService("test"); + IndexService indexService = service.indexService(resolveIndex("test")); if (indexService != null) { assertEquals(indexService.getIndexSettings().getRefreshInterval().millis(), 1000); assertEquals(indexService.getIndexSettings().getFlushThresholdSize().bytes(), 1024); diff --git a/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java b/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java index 8a9fa191854..35624085c94 100644 --- a/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java +++ b/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java @@ -47,6 +47,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.gateway.GatewayAllocator; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MapperService; @@ -376,12 +377,13 @@ public class RareClusterStateIT extends ESIntegTestCase { putMappingResponse.set(e); } }); + final Index index = resolveIndex("index"); // Wait for mappings to be available on master assertBusy(new Runnable() { @Override public void run() { final IndicesService indicesService = internalCluster().getInstance(IndicesService.class, master); - final IndexService indexService = indicesService.indexServiceSafe("index"); + final IndexService indexService = indicesService.indexServiceSafe(index); assertNotNull(indexService); final MapperService mapperService = indexService.mapperService(); DocumentMapper mapper = mapperService.documentMapper("type"); diff --git a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java index 948c005bf33..b1f94f203e4 100644 --- a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java @@ -336,10 +336,11 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase { // we have to do this in two steps as we now do async shard fetching before assigning, so the change to the // allocation filtering may not have immediate effect // TODO: we should add an easier to do this. It's too much of a song and dance.. + Index index = resolveIndex("test"); assertBusy(new Runnable() { @Override public void run() { - assertTrue(internalCluster().getInstance(IndicesService.class, node4).hasIndex("test")); + assertTrue(internalCluster().getInstance(IndicesService.class, node4).hasIndex(index)); } }); diff --git a/core/src/test/java/org/elasticsearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java b/core/src/test/java/org/elasticsearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java index abfe18f8c58..a415b0992a7 100644 --- a/core/src/test/java/org/elasticsearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java +++ b/core/src/test/java/org/elasticsearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java @@ -67,9 +67,11 @@ public class IngestProcessorNotInstalledOnAllNodesIT extends ESIntegTestCase { public void testFailPipelineCreation() throws Exception { installPlugin = true; - internalCluster().startNode(); + String node1 = internalCluster().startNode(); installPlugin = false; - internalCluster().startNode(); + String node2 = internalCluster().startNode(); + ensureStableCluster(2, node1); + ensureStableCluster(2, node2); try { client().admin().cluster().preparePutPipeline("_id", pipelineSource).get(); diff --git a/core/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java b/core/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java index a47217e3048..4b514763f72 100644 --- a/core/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java +++ b/core/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java @@ -165,7 +165,7 @@ public class RecoveriesCollectionTests extends ESSingleNodeTestCase { long startRecovery(RecoveriesCollection collection, RecoveryTargetService.RecoveryListener listener, TimeValue timeValue) { IndicesService indexServices = getInstanceFromNode(IndicesService.class); - IndexShard indexShard = indexServices.indexServiceSafe("test").getShardOrNull(0); + IndexShard indexShard = indexServices.indexServiceSafe(resolveIndex("test")).getShardOrNull(0); final DiscoveryNode sourceNode = new DiscoveryNode("id", DummyTransportAddress.INSTANCE, Version.CURRENT); return collection.startRecovery(indexShard, sourceNode, listener, timeValue); } diff --git a/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java b/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java index 0825da4d4df..a369b44e2b1 100644 --- a/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java +++ b/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java @@ -122,26 +122,21 @@ public class ScriptServiceTests extends ESTestCase { } public void testScriptsWithoutExtensions() throws IOException { - buildScriptService(Settings.EMPTY); - logger.info("--> setup two test files one with extension and another without"); Path testFileNoExt = scriptsFilePath.resolve("test_no_ext"); Path testFileWithExt = scriptsFilePath.resolve("test_script.tst"); Streams.copy("test_file_no_ext".getBytes("UTF-8"), Files.newOutputStream(testFileNoExt)); Streams.copy("test_file".getBytes("UTF-8"), Files.newOutputStream(testFileWithExt)); resourceWatcherService.notifyNow(); - logger.info("--> verify that file with extension was correctly processed"); CompiledScript compiledScript = scriptService.compile(new Script("test_script", ScriptType.FILE, "test", null), ScriptContext.Standard.SEARCH, Collections.emptyMap()); assertThat(compiledScript.compiled(), equalTo((Object) "compiled_test_file")); - logger.info("--> delete both files"); Files.delete(testFileNoExt); Files.delete(testFileWithExt); resourceWatcherService.notifyNow(); - logger.info("--> verify that file with extension was correctly removed"); try { scriptService.compile(new Script("test_script", ScriptType.FILE, "test", null), ScriptContext.Standard.SEARCH, Collections.emptyMap()); @@ -151,6 +146,25 @@ public class ScriptServiceTests extends ESTestCase { } } + public void testScriptCompiledOnceHiddenFileDetected() throws IOException { + buildScriptService(Settings.EMPTY); + + Path testHiddenFile = scriptsFilePath.resolve(".hidden_file"); + Streams.copy("test_hidden_file".getBytes("UTF-8"), Files.newOutputStream(testHiddenFile)); + + Path testFileScript = scriptsFilePath.resolve("file_script.tst"); + Streams.copy("test_file_script".getBytes("UTF-8"), Files.newOutputStream(testFileScript)); + resourceWatcherService.notifyNow(); + + CompiledScript compiledScript = scriptService.compile(new Script("file_script", ScriptType.FILE, "test", null), + ScriptContext.Standard.SEARCH, Collections.emptyMap()); + assertThat(compiledScript.compiled(), equalTo((Object) "compiled_test_file_script")); + + Files.delete(testHiddenFile); + Files.delete(testFileScript); + resourceWatcherService.notifyNow(); + } + public void testInlineScriptCompiledOnceCache() throws IOException { buildScriptService(Settings.EMPTY); CompiledScript compiledScript1 = scriptService.compile(new Script("1+1", ScriptType.INLINE, "test", null), diff --git a/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java b/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java index 52f9c59e744..35a7cb3c7e1 100644 --- a/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java @@ -546,7 +546,7 @@ public class SearchSourceBuilderTests extends ESTestCase { SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.parseSearchSource(parser, createParseContext(parser), aggParsers, suggesters); assertEquals(1, searchSourceBuilder.sorts().size()); - assertEquals("{\"foo\":{}}", searchSourceBuilder.sorts().get(0).toUtf8()); + assertEquals("{\"foo\":{\"order\":\"asc\"}}", searchSourceBuilder.sorts().get(0).toUtf8()); } } diff --git a/core/src/test/java/org/elasticsearch/search/child/ParentFieldLoadingIT.java b/core/src/test/java/org/elasticsearch/search/child/ParentFieldLoadingIT.java index 0c7c069ec34..8afbdca8c2e 100644 --- a/core/src/test/java/org/elasticsearch/search/child/ParentFieldLoadingIT.java +++ b/core/src/test/java/org/elasticsearch/search/child/ParentFieldLoadingIT.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.DocumentMapper; @@ -143,6 +144,7 @@ public class ParentFieldLoadingIT extends ESIntegTestCase { .setUpdateAllTypes(true) .get(); assertAcked(putMappingResponse); + Index test = resolveIndex("test"); assertBusy(new Runnable() { @Override public void run() { @@ -152,7 +154,7 @@ public class ParentFieldLoadingIT extends ESIntegTestCase { boolean verified = false; IndicesService indicesService = internalCluster().getInstance(IndicesService.class, nodeName); - IndexService indexService = indicesService.indexService("test"); + IndexService indexService = indicesService.indexService(test); if (indexService != null) { MapperService mapperService = indexService.mapperService(); DocumentMapper documentMapper = mapperService.documentMapper("child"); diff --git a/core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java b/core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java index e0bc26c9296..23e2592447b 100644 --- a/core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java +++ b/core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java @@ -567,7 +567,7 @@ public class MultiMatchQueryIT extends ESIntegTestCase { // test if boosts work searchResponse = client().prepareSearch("test") - .setQuery(randomizeType(multiMatchQuery("the ultimate", "full_name", "first_name", "last_name", "category").field("last_name", 2) + .setQuery(randomizeType(multiMatchQuery("the ultimate", "full_name", "first_name", "last_name", "category").field("last_name", 10) .type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) .operator(Operator.AND))).get(); assertFirstHit(searchResponse, hasId("ultimate1")); // has ultimate in the last_name and that is boosted diff --git a/core/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java b/core/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java index dfea1a9316b..dc61f0ef34c 100644 --- a/core/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java +++ b/core/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java @@ -43,7 +43,7 @@ import java.io.IOException; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; -public abstract class AbstractSortTestCase & ToXContent & SortElementParserTemp> extends ESTestCase { +public abstract class AbstractSortTestCase & SortElementParserTemp> extends ESTestCase { protected static NamedWriteableRegistry namedWriteableRegistry; @@ -53,7 +53,8 @@ public abstract class AbstractSortTestCase & ToXCont @BeforeClass public static void init() { namedWriteableRegistry = new NamedWriteableRegistry(); - namedWriteableRegistry.registerPrototype(GeoDistanceSortBuilder.class, GeoDistanceSortBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(SortBuilder.class, GeoDistanceSortBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(SortBuilder.class, ScoreSortBuilder.PROTOTYPE); indicesQueriesRegistry = new SearchModule(Settings.EMPTY, namedWriteableRegistry).buildQueryParserRegistry(); } @@ -85,9 +86,9 @@ public abstract class AbstractSortTestCase & ToXCont XContentParser itemParser = XContentHelper.createParser(builder.bytes()); itemParser.nextToken(); - + /* - * filter out name of sort, or field name to sort on for element fieldSort + * filter out name of sort, or field name to sort on for element fieldSort */ itemParser.nextToken(); String elementName = itemParser.currentName(); @@ -95,7 +96,7 @@ public abstract class AbstractSortTestCase & ToXCont QueryParseContext context = new QueryParseContext(indicesQueriesRegistry); context.reset(itemParser); - NamedWriteable parsedItem = testItem.fromXContent(context, elementName); + SortBuilder parsedItem = testItem.fromXContent(context, elementName); assertNotSame(testItem, parsedItem); assertEquals(testItem, parsedItem); assertEquals(testItem.hashCode(), parsedItem.hashCode()); @@ -146,17 +147,15 @@ public abstract class AbstractSortTestCase & ToXCont } } + @SuppressWarnings("unchecked") protected T copyItem(T original) throws IOException { try (BytesStreamOutput output = new BytesStreamOutput()) { original.writeTo(output); try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(output.bytes()), namedWriteableRegistry)) { - @SuppressWarnings("unchecked") - T prototype = (T) namedWriteableRegistry.getPrototype(getPrototype(), original.getWriteableName()); - T copy = (T) prototype.readFrom(in); - return copy; + T prototype = (T) namedWriteableRegistry.getPrototype(SortBuilder.class, + original.getWriteableName()); + return prototype.readFrom(in); } } } - - protected abstract Class getPrototype(); } diff --git a/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java b/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java index e957db58b38..611053b14d5 100644 --- a/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java @@ -60,7 +60,7 @@ public class GeoDistanceSortBuilderTests extends AbstractSortTestCase getPrototype() { - return (Class) GeoDistanceSortBuilder.PROTOTYPE.getClass(); - } - public void testSortModeSumIsRejectedInSetter() { GeoDistanceSortBuilder builder = new GeoDistanceSortBuilder("testname", -1, -1); GeoPoint point = RandomGeoGenerator.randomPoint(getRandom()); @@ -189,23 +183,23 @@ public class GeoDistanceSortBuilderTests extends AbstractSortTestCase { + + @Override + protected ScoreSortBuilder createTestItem() { + return new ScoreSortBuilder().order(randomBoolean() ? SortOrder.ASC : SortOrder.DESC); + } + + @Override + protected ScoreSortBuilder mutate(ScoreSortBuilder original) throws IOException { + ScoreSortBuilder result = new ScoreSortBuilder(); + if (original.order() == SortOrder.ASC) { + result.order(SortOrder.DESC); + } else { + result.order(SortOrder.ASC); + } + return result; + } + + @Rule + public ExpectedException exceptionRule = ExpectedException.none(); + + /** + * test passing null to {@link ScoreSortBuilder#order(SortOrder)} is illegal + */ + public void testIllegalOrder() { + exceptionRule.expect(NullPointerException.class); + exceptionRule.expectMessage("sort order cannot be null."); + new ScoreSortBuilder().order(null); + } + + /** + * test parsing order parameter if specified as `order` field in the json + * instead of the `reverse` field that we render in toXContent + */ + public void testParseOrder() throws IOException { + QueryParseContext context = new QueryParseContext(indicesQueriesRegistry); + context.parseFieldMatcher(new ParseFieldMatcher(Settings.EMPTY)); + SortOrder order = randomBoolean() ? SortOrder.ASC : SortOrder.DESC; + String scoreSortString = "{ \"_score\": { \"order\": \""+ order.toString() +"\" }}"; + XContentParser parser = XContentFactory.xContent(scoreSortString).createParser(scoreSortString); + // need to skip until parser is located on second START_OBJECT + parser.nextToken(); + parser.nextToken(); + parser.nextToken(); + + context.reset(parser); + ScoreSortBuilder scoreSort = ScoreSortBuilder.PROTOTYPE.fromXContent(context, "_score"); + assertEquals(order, scoreSort.order()); + } +} diff --git a/core/src/test/java/org/elasticsearch/search/sort/SortParserTests.java b/core/src/test/java/org/elasticsearch/search/sort/SortParserTests.java index cbd7b5468b2..0c64b7e7b15 100644 --- a/core/src/test/java/org/elasticsearch/search/sort/SortParserTests.java +++ b/core/src/test/java/org/elasticsearch/search/sort/SortParserTests.java @@ -50,7 +50,7 @@ public class SortParserTests extends ESSingleNodeTestCase { XContentParser parser = XContentHelper.createParser(sortBuilder.bytes()); parser.nextToken(); GeoDistanceSortParser geoParser = new GeoDistanceSortParser(); - geoParser.parse(parser, context); + geoParser.parse(parser, context.getQueryShardContext()); sortBuilder = jsonBuilder(); sortBuilder.startObject(); @@ -139,6 +139,6 @@ public class SortParserTests extends ESSingleNodeTestCase { XContentParser parser = XContentHelper.createParser(sortBuilder.bytes()); parser.nextToken(); GeoDistanceSortParser geoParser = new GeoDistanceSortParser(); - geoParser.parse(parser, context); + geoParser.parse(parser, context.getQueryShardContext()); } } diff --git a/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java index 7e9bd14f9f3..dc803a46412 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -137,6 +137,32 @@ public abstract class AbstractSnapshotIntegTestCase extends ESIntegTestCase { return null; } + public static void blockAllDataNodes(String repository) { + for(RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) { + ((MockRepository)repositoriesService.repository(repository)).blockOnDataFiles(true); + } + } + + public static void unblockAllDataNodes(String repository) { + for(RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) { + ((MockRepository)repositoriesService.repository(repository)).unblock(); + } + } + + public void waitForBlockOnAnyDataNode(String repository, TimeValue timeout) throws InterruptedException { + if (false == awaitBusy(() -> { + for(RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) { + MockRepository mockRepository = (MockRepository) repositoriesService.repository(repository); + if (mockRepository.blocked()) { + return true; + } + } + return false; + }, timeout.millis(), TimeUnit.MILLISECONDS)) { + fail("Timeout waiting for repository block on any data node!!!"); + } + } + public static void unblockNode(String node) { ((MockRepository)internalCluster().getInstance(RepositoriesService.class, node).repository("test-repo")).unblock(); } diff --git a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 65337d4b632..9fb2b0f9989 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -1865,6 +1865,66 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas } } + public void testCloseIndexDuringRestore() throws Exception { + Client client = client(); + + logger.info("--> creating repository"); + assertAcked(client.admin().cluster().preparePutRepository("test-repo") + .setType("mock").setSettings(Settings.settingsBuilder() + .put("location", randomRepoPath()) + .put("compress", randomBoolean()) + .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + )); + + createIndex("test-idx-1", "test-idx-2"); + ensureGreen(); + + logger.info("--> indexing some data"); + for (int i = 0; i < 100; i++) { + index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i); + index("test-idx-2", "doc", Integer.toString(i), "foo", "baz" + i); + } + refresh(); + assertThat(client.prepareSearch("test-idx-1").setSize(0).get().getHits().totalHits(), equalTo(100L)); + assertThat(client.prepareSearch("test-idx-2").setSize(0).get().getHits().totalHits(), equalTo(100L)); + + logger.info("--> snapshot"); + assertThat(client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap") + .setIndices("test-idx-*").setWaitForCompletion(true).get().getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); + + logger.info("--> deleting indices before restoring"); + assertAcked(client.admin().indices().prepareDelete("test-idx-*").get()); + + blockAllDataNodes("test-repo"); + logger.info("--> execution will be blocked on all data nodes"); + + logger.info("--> start restore"); + ListenableActionFuture restoreFut = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap") + .setWaitForCompletion(true) + .execute(); + + logger.info("--> waiting for block to kick in"); + waitForBlockOnAnyDataNode("test-repo", TimeValue.timeValueSeconds(60)); + + logger.info("--> close index while restore is running"); + try { + client.admin().indices().prepareClose("test-idx-1").get(); + fail("Expected closing index to fail during restore"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("Cannot close indices that are being restored: [test-idx-1]")); + } + + logger.info("--> unblocking all data nodes"); + unblockAllDataNodes("test-repo"); + + logger.info("--> wait for restore to finish"); + RestoreSnapshotResponse restoreSnapshotResponse = restoreFut.get(); + logger.info("--> check that all shards were recovered"); + assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); + assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), greaterThan(0)); + assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); + } + public void testDeleteOrphanSnapshot() throws Exception { Client client = client(); diff --git a/core/src/test/java/org/elasticsearch/test/NoopDiscovery.java b/core/src/test/java/org/elasticsearch/test/NoopDiscovery.java index 427dce714e8..3193aaf458e 100644 --- a/core/src/test/java/org/elasticsearch/test/NoopDiscovery.java +++ b/core/src/test/java/org/elasticsearch/test/NoopDiscovery.java @@ -21,13 +21,11 @@ package org.elasticsearch.test; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RoutingService; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.component.LifecycleListener; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.DiscoveryStats; -import org.elasticsearch.node.service.NodeService; public class NoopDiscovery implements Discovery { @@ -42,11 +40,6 @@ public class NoopDiscovery implements Discovery { return null; } - @Override - public void setNodeService(@Nullable NodeService nodeService) { - - } - @Override public void setRoutingService(RoutingService routingService) { diff --git a/core/src/test/java/org/elasticsearch/tribe/TribeIT.java b/core/src/test/java/org/elasticsearch/tribe/TribeIT.java index 55a79ffddfc..7313d880a63 100644 --- a/core/src/test/java/org/elasticsearch/tribe/TribeIT.java +++ b/core/src/test/java/org/elasticsearch/tribe/TribeIT.java @@ -132,6 +132,9 @@ public class TribeIT extends ESIntegTestCase { Settings.Builder tribe1Defaults = Settings.builder(); Settings.Builder tribe2Defaults = Settings.builder(); for (Map.Entry entry : asMap.entrySet()) { + if (entry.getKey().startsWith("path.")) { + continue; + } tribe1Defaults.put("tribe.t1." + entry.getKey(), entry.getValue()); tribe2Defaults.put("tribe.t2." + entry.getKey(), entry.getValue()); } diff --git a/core/src/test/java/org/elasticsearch/tribe/TribeServiceTests.java b/core/src/test/java/org/elasticsearch/tribe/TribeServiceTests.java new file mode 100644 index 00000000000..5174a317a40 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/tribe/TribeServiceTests.java @@ -0,0 +1,96 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.tribe; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; + +public class TribeServiceTests extends ESTestCase { + public void testMinimalSettings() { + Settings globalSettings = Settings.builder() + .put("node.name", "nodename") + .put("path.home", "some/path").build(); + Settings clientSettings = TribeService.buildClientSettings("tribe1", globalSettings, Settings.EMPTY); + assertEquals("some/path", clientSettings.get("path.home")); + assertEquals("nodename/tribe1", clientSettings.get("node.name")); + assertEquals("tribe1", clientSettings.get("tribe.name")); + assertEquals("false", clientSettings.get("http.enabled")); + assertEquals("true", clientSettings.get("node.client")); + assertEquals(5, clientSettings.getAsMap().size()); + } + + public void testEnvironmentSettings() { + Settings globalSettings = Settings.builder() + .put("node.name", "nodename") + .put("path.home", "some/path") + .put("path.conf", "conf/path") + .put("path.plugins", "plugins/path") + .put("path.scripts", "scripts/path") + .put("path.logs", "logs/path").build(); + Settings clientSettings = TribeService.buildClientSettings("tribe1", globalSettings, Settings.EMPTY); + assertEquals("some/path", clientSettings.get("path.home")); + assertEquals("conf/path", clientSettings.get("path.conf")); + assertEquals("plugins/path", clientSettings.get("path.plugins")); + assertEquals("scripts/path", clientSettings.get("path.scripts")); + assertEquals("logs/path", clientSettings.get("path.logs")); + + Settings tribeSettings = Settings.builder() + .put("path.home", "alternate/path").build(); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { + TribeService.buildClientSettings("tribe1", globalSettings, tribeSettings); + }); + assertTrue(e.getMessage(), e.getMessage().contains("Setting [path.home] not allowed in tribe client")); + } + + public void testPassthroughSettings() { + Settings globalSettings = Settings.builder() + .put("node.name", "nodename") + .put("path.home", "some/path") + .put("network.host", "0.0.0.0") + .put("network.bind_host", "1.1.1.1") + .put("network.publish_host", "2.2.2.2") + .put("transport.host", "3.3.3.3") + .put("transport.bind_host", "4.4.4.4") + .put("transport.publish_host", "5.5.5.5").build(); + Settings clientSettings = TribeService.buildClientSettings("tribe1", globalSettings, Settings.EMPTY); + assertEquals("0.0.0.0", clientSettings.get("network.host")); + assertEquals("1.1.1.1", clientSettings.get("network.bind_host")); + assertEquals("2.2.2.2", clientSettings.get("network.publish_host")); + assertEquals("3.3.3.3", clientSettings.get("transport.host")); + assertEquals("4.4.4.4", clientSettings.get("transport.bind_host")); + assertEquals("5.5.5.5", clientSettings.get("transport.publish_host")); + + // per tribe client overrides still work + Settings tribeSettings = Settings.builder() + .put("network.host", "3.3.3.3") + .put("network.bind_host", "4.4.4.4") + .put("network.publish_host", "5.5.5.5") + .put("transport.host", "6.6.6.6") + .put("transport.bind_host", "7.7.7.7") + .put("transport.publish_host", "8.8.8.8").build(); + clientSettings = TribeService.buildClientSettings("tribe1", globalSettings, tribeSettings); + assertEquals("3.3.3.3", clientSettings.get("network.host")); + assertEquals("4.4.4.4", clientSettings.get("network.bind_host")); + assertEquals("5.5.5.5", clientSettings.get("network.publish_host")); + assertEquals("6.6.6.6", clientSettings.get("transport.host")); + assertEquals("7.7.7.7", clientSettings.get("transport.bind_host")); + assertEquals("8.8.8.8", clientSettings.get("transport.publish_host")); + } +} diff --git a/docs/reference/indices/clearcache.asciidoc b/docs/reference/indices/clearcache.asciidoc index 21008e5b46b..8ebb9e3488a 100644 --- a/docs/reference/indices/clearcache.asciidoc +++ b/docs/reference/indices/clearcache.asciidoc @@ -2,7 +2,7 @@ == Clear Cache The clear cache API allows to clear either all caches or specific cached -associated with one ore more indices. +associated with one or more indices. [source,js] -------------------------------------------------- diff --git a/docs/reference/mapping/params/coerce.asciidoc b/docs/reference/mapping/params/coerce.asciidoc index c9491607a6b..0121c307230 100644 --- a/docs/reference/mapping/params/coerce.asciidoc +++ b/docs/reference/mapping/params/coerce.asciidoc @@ -12,7 +12,6 @@ For instance: * Strings will be coerced to numbers. * Floating points will be truncated for integer values. -* Lon/lat geo-points will be normalized to a standard -180:180 / -90:90 coordinate system. For instance: diff --git a/docs/reference/mapping/types/geo-point.asciidoc b/docs/reference/mapping/types/geo-point.asciidoc index e724a5428b2..45c5e65addb 100644 --- a/docs/reference/mapping/types/geo-point.asciidoc +++ b/docs/reference/mapping/types/geo-point.asciidoc @@ -101,17 +101,6 @@ The following parameters are accepted by `geo_point` fields: [horizontal] -<>:: - - Normalize longitude and latitude values to a standard -180:180 / -90:90 - coordinate system. Accepts `true` and `false` (default). - -<>:: - - Should the field be stored on disk in a column-stride fashion, so that it - can later be used for sorting, aggregations, or scripting? Accepts `true` - (default) or `false`. - <>:: Should the geo-point also be indexed as a geohash in the `.geohash` diff --git a/docs/reference/migration/migrate_2_2.asciidoc b/docs/reference/migration/migrate_2_2.asciidoc index 39c059e7f47..9611d86a2ac 100644 --- a/docs/reference/migration/migrate_2_2.asciidoc +++ b/docs/reference/migration/migrate_2_2.asciidoc @@ -4,6 +4,16 @@ This section discusses the changes that you need to be aware of when migrating your application to Elasticsearch 2.2. +[[float]] +=== Mapping APIs + +==== Geo Point Type + +The `geo_point` format has been changed to reduce index size and the time required to both index and query +geo point data. To make these performance improvements possible both `doc_values` are `coerce` are required +and therefore cannot be changed. For this reason the `doc_values` and `coerce` parameters have been removed +from the <> field mapping. + [float] === Scripting and security diff --git a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc index c52bcb93e7d..90ae7367197 100644 --- a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc +++ b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc @@ -52,9 +52,6 @@ Then the following simple query can be executed with a |Option |Description |`_name` |Optional name field to identify the filter -|`coerce` |Set to `true` to normalize longitude and latitude values to a -standard -180:180 / -90:90 coordinate system. (default is `false`). - |`ignore_malformed` |Set to `true` to accept geo points with invalid latitude or longitude (default is `false`). diff --git a/docs/reference/query-dsl/geo-distance-query.asciidoc b/docs/reference/query-dsl/geo-distance-query.asciidoc index c5b6029dc2f..7ea380bdad2 100644 --- a/docs/reference/query-dsl/geo-distance-query.asciidoc +++ b/docs/reference/query-dsl/geo-distance-query.asciidoc @@ -162,11 +162,6 @@ The following are options allowed on the filter: Optional name field to identify the query -`coerce`:: - - Set to `true` to normalize longitude and latitude values to a standard -180:180 / -90:90 - coordinate system. (default is `false`). - `ignore_malformed`:: Set to `true` to accept geo points with invalid latitude or diff --git a/docs/reference/query-dsl/geo-polygon-query.asciidoc b/docs/reference/query-dsl/geo-polygon-query.asciidoc index 306b2dd2d84..269aeed09ca 100644 --- a/docs/reference/query-dsl/geo-polygon-query.asciidoc +++ b/docs/reference/query-dsl/geo-polygon-query.asciidoc @@ -34,9 +34,6 @@ points. Here is an example: |Option |Description |`_name` |Optional name field to identify the filter -|`coerce` |Set to `true` to normalize longitude and latitude values to a -standard -180:180 / -90:90 coordinate system. (default is `false`). - |`ignore_malformed` |Set to `true` to accept geo points with invalid latitude or longitude (default is `false`). |======================================================================= diff --git a/docs/reference/setup/configuration.asciidoc b/docs/reference/setup/configuration.asciidoc index 03037207fb0..bef563cd965 100644 --- a/docs/reference/setup/configuration.asciidoc +++ b/docs/reference/setup/configuration.asciidoc @@ -43,6 +43,13 @@ using the <> API, with: curl localhost:9200/_nodes/stats/process?pretty -------------------------------------------------- +[float] +[[max-number-of-threads]] +==== Number of threads + +Make sure that the number of threads that the Elasticsearch user can +create is at least 2048. + [float] [[vm-max-map-count]] ==== Virtual memory diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/GeoShapeIntegrationTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/GeoShapeIntegrationTests.java index 98a23b3e1fd..8a86a0a1fb4 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/GeoShapeIntegrationTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/GeoShapeIntegrationTests.java @@ -76,7 +76,7 @@ public class GeoShapeIntegrationTests extends ESIntegTestCase { // left orientation test IndicesService indicesService = internalCluster().getInstance(IndicesService.class, findNodeName(idxName)); - IndexService indexService = indicesService.indexService(idxName); + IndexService indexService = indicesService.indexService(resolveIndex(idxName)); MappedFieldType fieldType = indexService.mapperService().fullName("location"); assertThat(fieldType, instanceOf(GeoShapeFieldMapper.GeoShapeFieldType.class)); @@ -88,7 +88,7 @@ public class GeoShapeIntegrationTests extends ESIntegTestCase { // right orientation test indicesService = internalCluster().getInstance(IndicesService.class, findNodeName(idxName+"2")); - indexService = indicesService.indexService(idxName+"2"); + indexService = indicesService.indexService(resolveIndex((idxName+"2"))); fieldType = indexService.mapperService().fullName("location"); assertThat(fieldType, instanceOf(GeoShapeFieldMapper.GeoShapeFieldType.class)); diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java index 1ac6117d02b..d51fb7e8bc1 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java @@ -19,19 +19,31 @@ package org.elasticsearch.index.reindex; +import java.io.IOException; +import java.util.Arrays; +import java.util.List; + import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.CompositeIndicesRequest; +import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.uid.Versions; -import java.io.IOException; +import static java.util.Collections.unmodifiableList; import static org.elasticsearch.action.ValidateActions.addValidationError; import static org.elasticsearch.index.VersionType.INTERNAL; -public class ReindexRequest extends AbstractBulkIndexByScrollRequest { +/** + * Request to reindex some documents from one index to another. This implements CompositeIndicesRequest but in a misleading way. Rather than + * returning all the subrequests that it will make it tries to return a representative set of subrequests. This is best-effort for a bunch + * of reasons, not least of which that scripts are allowed to change the destination request in drastic ways, including changing the index + * to which documents are written. + */ +public class ReindexRequest extends AbstractBulkIndexByScrollRequest implements CompositeIndicesRequest { /** * Prototype for index requests. */ @@ -123,4 +135,20 @@ public class ReindexRequest extends AbstractBulkIndexByScrollRequestnot + * accurate since it returns a prototype {@link IndexRequest} and not the actual requests that will be issued as part of the + * execution of this request. Additionally, scripts can modify the underlying {@link IndexRequest} and change values such as the index, + * type, {@link org.elasticsearch.action.support.IndicesOptions}. In short - only use this for very course reasoning about the request. + * + * @return a list comprising of the {@link SearchRequest} and the prototype {@link IndexRequest} + */ + @Override + public List subRequests() { + assert getSearchRequest() != null; + assert getDestination() != null; + return unmodifiableList(Arrays.asList(getSearchRequest(), getDestination())); + } } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java index b2775393877..915921d6077 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java @@ -19,13 +19,23 @@ package org.elasticsearch.index.reindex; +import java.util.ArrayList; +import java.util.List; + +import org.elasticsearch.action.CompositeIndicesRequest; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; +import static java.util.Collections.unmodifiableList; + /** - * Request to reindex a set of documents where they are without changing their - * locations or IDs. + * Request to update some documents. That means you can't change their type, id, index, or anything like that. This implements + * CompositeIndicesRequest but in a misleading way. Rather than returning all the subrequests that it will make it tries to return a + * representative set of subrequests. This is best-effort but better than {@linkplain ReindexRequest} because scripts can't change the + * destination index and things. */ -public class UpdateByQueryRequest extends AbstractBulkIndexByScrollRequest { +public class UpdateByQueryRequest extends AbstractBulkIndexByScrollRequest implements CompositeIndicesRequest { /** * Ingest pipeline to set on index requests made by this action. */ @@ -64,4 +74,26 @@ public class UpdateByQueryRequest extends AbstractBulkIndexByScrollRequestnot + * accurate since it returns dummy {@link IndexRequest}s and not the actual requests that will be issued as part of the + * execution of this request. + * + * @return a list comprising of the {@link SearchRequest} and dummy {@link IndexRequest}s + */ + @Override + public List subRequests() { + assert getSearchRequest() != null; + List subRequests = new ArrayList<>(); + // One dummy IndexRequest per destination index. + for (String index : getSearchRequest().indices()) { + IndexRequest request = new IndexRequest(); + request.index(index); + subRequests.add(request); + } + subRequests.add(getSearchRequest()); + return unmodifiableList(subRequests); + }; } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryRequestTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryRequestTests.java new file mode 100644 index 00000000000..f6780729143 --- /dev/null +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryRequestTests.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.reindex; + +import java.util.List; + +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.test.ESTestCase; + +import static org.apache.lucene.util.TestUtil.randomSimpleString; +import static org.hamcrest.Matchers.arrayWithSize; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.sameInstance; + +public class UpdateByQueryRequestTests extends ESTestCase { + public void testUpdateByQueryRequestImplementsCompositeIndicesRequestWithDummies() { + int numIndices = between(1, 100); + String[] indices = new String[numIndices]; + for (int i = 0; i < numIndices; i++) { + indices[i] = randomSimpleString(random(), 1, 30); + } + UpdateByQueryRequest request = new UpdateByQueryRequest(new SearchRequest(indices)); + List subRequests = request.subRequests(); + assertThat(subRequests, hasSize(numIndices + 1)); + for (int i = 0; i < numIndices; i++) { + assertThat(subRequests.get(i).indices(), arrayWithSize(1)); + assertEquals(indices[i], subRequests.get(i).indices()[0]); + } + assertThat(subRequests.get(numIndices), sameInstance(request.getSearchRequest())); + } +} diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/10_basic.yaml b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/10_basic.yaml index c23e5da95a1..7f84c1aac8b 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/10_basic.yaml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/10_basic.yaml @@ -58,9 +58,6 @@ --- "wait_for_completion=false": - - skip: - version: "0.0.0 - " - reason: breaks other tests by leaving a running reindex behind - do: index: index: source @@ -79,6 +76,7 @@ dest: index: dest - match: {task: '/.+:\d+/'} + - set: {task: task} - is_false: updated - is_false: version_conflicts - is_false: batches @@ -87,6 +85,11 @@ - is_false: took - is_false: created + - do: + tasks.list: + wait_for_completion: true + task_id: $task + --- "Response format for version conflict": - do: diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/update-by-query/10_basic.yaml b/modules/reindex/src/test/resources/rest-api-spec/test/update-by-query/10_basic.yaml index 383e945bbf2..94ffa2349a9 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/update-by-query/10_basic.yaml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/update-by-query/10_basic.yaml @@ -37,6 +37,7 @@ wait_for_completion: false index: test - match: {task: '/.+:\d+/'} + - set: {task: task} - is_false: updated - is_false: version_conflicts - is_false: batches @@ -45,6 +46,11 @@ - is_false: took - is_false: created + - do: + tasks.list: + wait_for_completion: true + task_id: $task + --- "Response for version conflict": - do: diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuTransformTokenFilterFactory.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuTransformTokenFilterFactory.java index 6ecdf3888e9..f145ad4ae30 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuTransformTokenFilterFactory.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuTransformTokenFilterFactory.java @@ -22,7 +22,6 @@ package org.elasticsearch.index.analysis; import com.ibm.icu.text.Transliterator; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.icu.ICUTransformFilter; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; @@ -36,7 +35,6 @@ public class IcuTransformTokenFilterFactory extends AbstractTokenFilterFactory { private final int dir; private final Transliterator transliterator; - @Inject public IcuTransformTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); this.id = settings.get("id", "Null"); diff --git a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiAnalyzerProvider.java b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiAnalyzerProvider.java index 8aa8ff3c1dd..21d9b804055 100644 --- a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiAnalyzerProvider.java +++ b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiAnalyzerProvider.java @@ -23,7 +23,6 @@ import org.apache.lucene.analysis.ja.JapaneseAnalyzer; import org.apache.lucene.analysis.ja.JapaneseTokenizer; import org.apache.lucene.analysis.ja.dict.UserDictionary; import org.apache.lucene.analysis.util.CharArraySet; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; @@ -36,7 +35,6 @@ public class KuromojiAnalyzerProvider extends AbstractIndexAnalyzerProvider stopWords = Analysis.parseStopWords(env, settings, JapaneseAnalyzer.getDefaultStopSet()); diff --git a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiBaseFormFilterFactory.java b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiBaseFormFilterFactory.java index e191d78198f..aa035d9edfd 100644 --- a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiBaseFormFilterFactory.java +++ b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiBaseFormFilterFactory.java @@ -21,14 +21,12 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.ja.JapaneseBaseFormFilter; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; public class KuromojiBaseFormFilterFactory extends AbstractTokenFilterFactory { - @Inject public KuromojiBaseFormFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); } diff --git a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiKatakanaStemmerFactory.java b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiKatakanaStemmerFactory.java index ebebdcb6bba..491f48e34c1 100644 --- a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiKatakanaStemmerFactory.java +++ b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiKatakanaStemmerFactory.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.ja.JapaneseKatakanaStemFilter; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; @@ -30,7 +29,6 @@ public class KuromojiKatakanaStemmerFactory extends AbstractTokenFilterFactory { private final int minimumLength; - @Inject public KuromojiKatakanaStemmerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); minimumLength = settings.getAsInt("minimum_length", JapaneseKatakanaStemFilter.DEFAULT_MINIMUM_LENGTH); diff --git a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiReadingFormFilterFactory.java b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiReadingFormFilterFactory.java index 59d1088fd1b..d0eb0cecdb9 100644 --- a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiReadingFormFilterFactory.java +++ b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiReadingFormFilterFactory.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.ja.JapaneseReadingFormFilter; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; @@ -30,7 +29,6 @@ public class KuromojiReadingFormFilterFactory extends AbstractTokenFilterFactory private final boolean useRomaji; - @Inject public KuromojiReadingFormFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); useRomaji = settings.getAsBoolean("use_romaji", false); diff --git a/plugins/analysis-phonetic/src/main/java/org/elasticsearch/index/analysis/PhoneticTokenFilterFactory.java b/plugins/analysis-phonetic/src/main/java/org/elasticsearch/index/analysis/PhoneticTokenFilterFactory.java index e33f1f1e7e2..75da19c0a3c 100644 --- a/plugins/analysis-phonetic/src/main/java/org/elasticsearch/index/analysis/PhoneticTokenFilterFactory.java +++ b/plugins/analysis-phonetic/src/main/java/org/elasticsearch/index/analysis/PhoneticTokenFilterFactory.java @@ -38,7 +38,6 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.phonetic.BeiderMorseFilter; import org.apache.lucene.analysis.phonetic.DoubleMetaphoneFilter; import org.apache.lucene.analysis.phonetic.PhoneticFilter; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; @@ -58,7 +57,6 @@ public class PhoneticTokenFilterFactory extends AbstractTokenFilterFactory { private NameType nametype; private RuleType ruletype; - @Inject public PhoneticTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); this.languageset = null; diff --git a/plugins/analysis-smartcn/src/main/java/org/elasticsearch/index/analysis/SmartChineseAnalyzerProvider.java b/plugins/analysis-smartcn/src/main/java/org/elasticsearch/index/analysis/SmartChineseAnalyzerProvider.java index 22fcf238725..591912b8fa3 100644 --- a/plugins/analysis-smartcn/src/main/java/org/elasticsearch/index/analysis/SmartChineseAnalyzerProvider.java +++ b/plugins/analysis-smartcn/src/main/java/org/elasticsearch/index/analysis/SmartChineseAnalyzerProvider.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.cn.smart.SmartChineseAnalyzer; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; @@ -31,7 +30,6 @@ public class SmartChineseAnalyzerProvider extends AbstractIndexAnalyzerProvider< private final SmartChineseAnalyzer analyzer; - @Inject public SmartChineseAnalyzerProvider(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.list.json b/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.list.json index 7e8683b3475..5cdeed1b142 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.list.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.list.json @@ -31,6 +31,10 @@ "parent_task": { "type" : "number", "description" : "Return tasks with specified parent task id. Set to -1 to return all." + }, + "wait_for_completion": { + "type": "boolean", + "description": "Wait for the matching tasks to complete (default: false)" } } }, diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index a3161f4090f..aea35a3acd0 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -41,6 +41,7 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsResponse; import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; +import org.elasticsearch.action.admin.indices.get.GetIndexResponse; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse; @@ -95,6 +96,7 @@ import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.env.Environment; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.MockEngineFactoryPlugin; import org.elasticsearch.index.IndexSettings; @@ -836,7 +838,7 @@ public abstract class ESIntegTestCase extends ESTestCase { assertThat(nodes, Matchers.not(Matchers.emptyIterable())); for (String node : nodes) { IndicesService indicesService = internalCluster().getInstance(IndicesService.class, node); - IndexService indexService = indicesService.indexService(index); + IndexService indexService = indicesService.indexService(resolveIndex(index)); assertThat("index service doesn't exists on " + node, indexService, notNullValue()); DocumentMapper documentMapper = indexService.mapperService().documentMapper(type); assertThat("document mapper doesn't exists on " + node, documentMapper, notNullValue()); @@ -2041,7 +2043,7 @@ public abstract class ESIntegTestCase extends ESTestCase { * of the provided index. */ protected String routingKeyForShard(String index, String type, int shard) { - return internalCluster().routingKeyForShard(index, type, shard, getRandom()); + return internalCluster().routingKeyForShard(resolveIndex(index), type, shard, getRandom()); } /** @@ -2144,4 +2146,11 @@ public abstract class ESIntegTestCase extends ESTestCase { public @interface SuppressNetworkMode { } + public static Index resolveIndex(String index) { + GetIndexResponse getIndexResponse = client().admin().indices().prepareGetIndex().setIndices(index).get(); + assertTrue("index " + index + " not found", getIndexResponse.getSettings().containsKey(index)); + String uuid = getIndexResponse.getSettings().get(index).get(IndexMetaData.SETTING_INDEX_UUID); + return new Index(index, uuid); + } + } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java index fc713400262..6e16d60eafc 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java @@ -22,6 +22,7 @@ import org.apache.lucene.util.IOUtils; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; +import org.elasticsearch.action.admin.indices.get.GetIndexResponse; import org.elasticsearch.cache.recycler.PageCacheRecycler; import org.elasticsearch.client.Client; import org.elasticsearch.client.Requests; @@ -38,6 +39,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.env.Environment; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.node.MockNode; @@ -255,7 +257,14 @@ public abstract class ESSingleNodeTestCase extends ESTestCase { assertThat(health.getStatus(), lessThanOrEqualTo(ClusterHealthStatus.YELLOW)); assertThat("Cluster must be a single node cluster", health.getNumberOfDataNodes(), equalTo(1)); IndicesService instanceFromNode = getInstanceFromNode(IndicesService.class); - return instanceFromNode.indexServiceSafe(index); + return instanceFromNode.indexServiceSafe(resolveIndex(index)); + } + + public Index resolveIndex(String index) { + GetIndexResponse getIndexResponse = client().admin().indices().prepareGetIndex().setIndices(index).get(); + assertTrue("index " + index + " not found", getIndexResponse.getSettings().containsKey(index)); + String uuid = getIndexResponse.getSettings().get(index).get(IndexMetaData.SETTING_INDEX_UUID); + return new Index(index, uuid); } /** diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 04548eb85c9..82c7db11d69 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -66,6 +66,7 @@ import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.CommitStats; @@ -1697,7 +1698,7 @@ public final class InternalTestCluster extends TestCluster { } } - synchronized String routingKeyForShard(String index, String type, int shard, Random random) { + synchronized String routingKeyForShard(Index index, String type, int shard, Random random) { assertThat(shard, greaterThanOrEqualTo(0)); assertThat(shard, greaterThanOrEqualTo(0)); for (NodeAndClient n : nodes.values()) { @@ -1710,7 +1711,7 @@ public final class InternalTestCluster extends TestCluster { OperationRouting operationRouting = getInstanceFromNode(OperationRouting.class, node); while (true) { String routing = RandomStrings.randomAsciiOfLength(random, 10); - final int targetShard = operationRouting.indexShards(clusterService.state(), index, type, null, routing).shardId().getId(); + final int targetShard = operationRouting.indexShards(clusterService.state(), index.getName(), type, null, routing).shardId().getId(); if (shard == targetShard) { return routing; } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index 5684717342d..fbc518b136d 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -19,14 +19,34 @@ package org.elasticsearch.test.rest; -import com.carrotsearch.randomizedtesting.RandomizedTest; +import java.io.IOException; +import java.io.InputStream; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.nio.file.FileSystem; +import java.nio.file.FileSystems; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + import org.apache.lucene.util.IOUtils; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction; import org.elasticsearch.common.Strings; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.client.RestException; +import org.elasticsearch.test.rest.client.RestResponse; import org.elasticsearch.test.rest.parser.RestTestParseException; import org.elasticsearch.test.rest.parser.RestTestSuiteParser; import org.elasticsearch.test.rest.section.DoSection; @@ -42,24 +62,11 @@ import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; -import java.io.IOException; -import java.io.InputStream; -import java.net.InetSocketAddress; -import java.net.URI; -import java.net.URISyntaxException; -import java.net.URL; -import java.nio.file.FileSystem; -import java.nio.file.FileSystems; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.StandardCopyOption; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; +import com.carrotsearch.randomizedtesting.RandomizedTest; + +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static java.util.Collections.sort; /** * Runs the clients test suite against an elasticsearch cluster. @@ -261,7 +268,6 @@ public abstract class ESRestTestCase extends ESTestCase { @After public void wipeCluster() throws Exception { - // wipe indices Map deleteIndicesArgs = new HashMap<>(); deleteIndicesArgs.put("index", "*"); @@ -285,6 +291,30 @@ public abstract class ESRestTestCase extends ESTestCase { adminExecutionContext.callApi("snapshot.delete_repository", deleteSnapshotsArgs, Collections.emptyList(), Collections.emptyMap()); } + /** + * Logs a message if there are still running tasks. The reasoning is that any tasks still running are state the is trying to bleed into + * other tests. + */ + @After + public void logIfThereAreRunningTasks() throws InterruptedException, IOException, RestException { + RestResponse tasks = adminExecutionContext.callApi("tasks.list", emptyMap(), emptyList(), emptyMap()); + Set runningTasks = runningTasks(tasks); + // Ignore the task list API - it doens't count against us + runningTasks.remove(ListTasksAction.NAME); + runningTasks.remove(ListTasksAction.NAME + "[n]"); + if (runningTasks.isEmpty()) { + return; + } + List stillRunning = new ArrayList<>(runningTasks); + sort(stillRunning); + logger.info("There are still tasks running after this test that might break subsequent tests {}.", stillRunning); + /* + * This isn't a higher level log or outright failure because some of these tasks are run by the cluster in the background. If we + * could determine that some tasks are run by the user we'd fail the tests if those tasks were running and ignore any background + * tasks. + */ + } + @AfterClass public static void close() { if (restTestExecutionContext != null) { @@ -365,4 +395,19 @@ public abstract class ESRestTestCase extends ESTestCase { executableSection.execute(restTestExecutionContext); } } + + @SuppressWarnings("unchecked") + public Set runningTasks(RestResponse response) throws IOException { + Set runningTasks = new HashSet<>(); + Map nodes = (Map) response.evaluate("nodes"); + for (Map.Entry node : nodes.entrySet()) { + Map nodeInfo = (Map) node.getValue(); + Map nodeTasks = (Map) nodeInfo.get("tasks"); + for (Map.Entry taskAndName : nodeTasks.entrySet()) { + Map task = (Map) taskAndName.getValue(); + runningTasks.add(task.get("action").toString()); + } + } + return runningTasks; + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpRequestBuilder.java b/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpRequestBuilder.java index 6a484e9ae69..79f7502fb27 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpRequestBuilder.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpRequestBuilder.java @@ -114,9 +114,10 @@ public class HttpRequestBuilder { for (String pathPart : path) { try { finalPath.append('/'); - URI uri = new URI(null, null, null, -1, pathPart, null, null); + // We append "/" to the path part to handle parts that start with - or other invalid characters + URI uri = new URI(null, null, null, -1, "/" + pathPart, null, null); //manually escape any slash that each part may contain - finalPath.append(uri.getRawPath().replaceAll("/", "%2F")); + finalPath.append(uri.getRawPath().substring(1).replaceAll("/", "%2F")); } catch(URISyntaxException e) { throw new RuntimeException("unable to build uri", e); }