Merge branch 'master' into more_tribe_node_settings

This commit is contained in:
Ryan Ernst 2016-03-09 00:19:43 -08:00
commit b419a50381
57 changed files with 840 additions and 329 deletions

View File

@ -53,12 +53,18 @@ public class CancelTasksRequest extends BaseTasksRequest<CancelTasksRequest> {
return super.match(task) && task instanceof CancellableTask;
}
public CancelTasksRequest reason(String reason) {
/**
* Set the reason for canceling the task.
*/
public CancelTasksRequest setReason(String reason) {
this.reason = reason;
return this;
}
public String reason() {
/**
* The reason for canceling the task.
*/
public String getReason() {
return reason;
}
}

View File

@ -84,21 +84,21 @@ public class TransportCancelTasksAction extends TransportTasksAction<Cancellable
}
protected void processTasks(CancelTasksRequest request, Consumer<CancellableTask> operation) {
if (request.taskId().isSet() == false) {
if (request.getTaskId().isSet() == false) {
// we are only checking one task, we can optimize it
CancellableTask task = taskManager.getCancellableTask(request.taskId().getId());
CancellableTask task = taskManager.getCancellableTask(request.getTaskId().getId());
if (task != null) {
if (request.match(task)) {
operation.accept(task);
} else {
throw new IllegalArgumentException("task [" + request.taskId() + "] doesn't support this operation");
throw new IllegalArgumentException("task [" + request.getTaskId() + "] doesn't support this operation");
}
} else {
if (taskManager.getTask(request.taskId().getId()) != null) {
if (taskManager.getTask(request.getTaskId().getId()) != null) {
// The task exists, but doesn't support cancellation
throw new IllegalArgumentException("task [" + request.taskId() + "] doesn't support cancellation");
throw new IllegalArgumentException("task [" + request.getTaskId() + "] doesn't support cancellation");
} else {
throw new ResourceNotFoundException("task [{}] doesn't support cancellation", request.taskId());
throw new ResourceNotFoundException("task [{}] doesn't support cancellation", request.getTaskId());
}
}
} else {
@ -113,14 +113,14 @@ public class TransportCancelTasksAction extends TransportTasksAction<Cancellable
@Override
protected synchronized TaskInfo taskOperation(CancelTasksRequest request, CancellableTask cancellableTask) {
final BanLock banLock = new BanLock(nodes -> removeBanOnNodes(cancellableTask, nodes));
Set<String> childNodes = taskManager.cancel(cancellableTask, request.reason(), banLock::onTaskFinished);
Set<String> childNodes = taskManager.cancel(cancellableTask, request.getReason(), banLock::onTaskFinished);
if (childNodes != null) {
if (childNodes.isEmpty()) {
logger.trace("cancelling task {} with no children", cancellableTask.getId());
return cancellableTask.taskInfo(clusterService.localNode(), false);
} else {
logger.trace("cancelling task {} with children on nodes [{}]", cancellableTask.getId(), childNodes);
setBanOnNodes(request.reason(), cancellableTask, childNodes, banLock);
setBanOnNodes(request.getReason(), cancellableTask, childNodes, banLock);
return cancellableTask.taskInfo(clusterService.localNode(), false);
}
} else {

View File

@ -31,31 +31,49 @@ import java.io.IOException;
public class ListTasksRequest extends BaseTasksRequest<ListTasksRequest> {
private boolean detailed = false;
private boolean waitForCompletion = false;
/**
* Should the detailed task information be returned.
*/
public boolean detailed() {
public boolean getDetailed() {
return this.detailed;
}
/**
* Should the detailed task information be returned.
*/
public ListTasksRequest detailed(boolean detailed) {
public ListTasksRequest setDetailed(boolean detailed) {
this.detailed = detailed;
return this;
}
/**
* Should this request wait for all found tasks to complete?
*/
public boolean getWaitForCompletion() {
return waitForCompletion;
}
/**
* Should this request wait for all found tasks to complete?
*/
public ListTasksRequest setWaitForCompletion(boolean waitForCompletion) {
this.waitForCompletion = waitForCompletion;
return this;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
detailed = in.readBoolean();
waitForCompletion = in.readBoolean();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeBoolean(detailed);
out.writeBoolean(waitForCompletion);
}
}

View File

@ -35,7 +35,15 @@ public class ListTasksRequestBuilder extends TasksRequestBuilder<ListTasksReques
* Should detailed task information be returned.
*/
public ListTasksRequestBuilder setDetailed(boolean detailed) {
request.detailed(detailed);
request.setDetailed(detailed);
return this;
}
/**
* Should this request wait for all found tasks to complete?
*/
public final ListTasksRequestBuilder setWaitForCompletion(boolean waitForCompletion) {
request.setWaitForCompletion(waitForCompletion);
return this;
}
}

View File

@ -19,6 +19,8 @@
package org.elasticsearch.action.admin.cluster.node.tasks.list;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchTimeoutException;
import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.TaskOperationFailure;
import org.elasticsearch.action.support.ActionFilters;
@ -29,18 +31,24 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import java.util.function.Consumer;
import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
/**
*
*/
public class TransportListTasksAction extends TransportTasksAction<Task, ListTasksRequest, ListTasksResponse, TaskInfo> {
private static final TimeValue WAIT_FOR_COMPLETION_POLL = timeValueMillis(100);
private static final TimeValue DEFAULT_WAIT_FOR_COMPLETION_TIMEOUT = timeValueSeconds(30);
@Inject
public TransportListTasksAction(Settings settings, ClusterName clusterName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
@ -59,7 +67,34 @@ public class TransportListTasksAction extends TransportTasksAction<Task, ListTas
@Override
protected TaskInfo taskOperation(ListTasksRequest request, Task task) {
return task.taskInfo(clusterService.localNode(), request.detailed());
return task.taskInfo(clusterService.localNode(), request.getDetailed());
}
@Override
protected void processTasks(ListTasksRequest request, Consumer<Task> operation) {
if (false == request.getWaitForCompletion()) {
super.processTasks(request, operation);
return;
}
// If we should wait for completion then we have to intercept every found task and wait for it to leave the manager.
TimeValue timeout = request.getTimeout();
if (timeout == null) {
timeout = DEFAULT_WAIT_FOR_COMPLETION_TIMEOUT;
}
long timeoutTime = System.nanoTime() + timeout.nanos();
super.processTasks(request, operation.andThen((Task t) -> {
while (System.nanoTime() - timeoutTime < 0) {
if (taskManager.getTask(t.getId()) == null) {
return;
}
try {
Thread.sleep(WAIT_FOR_COMPLETION_POLL.millis());
} catch (InterruptedException e) {
throw new ElasticsearchException("Interrupted waiting for completion of [{}]", e, t);
}
}
throw new ElasticsearchTimeoutException("Timed out waiting for completion of [{}]", t);
}));
}
@Override

View File

@ -71,7 +71,7 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
* Sets the list of action masks for the actions that should be returned
*/
@SuppressWarnings("unchecked")
public final Request actions(String... actions) {
public final Request setActions(String... actions) {
this.actions = actions;
return (Request) this;
}
@ -79,16 +79,16 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
/**
* Return the list of action masks for the actions that should be returned
*/
public String[] actions() {
public String[] getActions() {
return actions;
}
public final String[] nodesIds() {
public final String[] getNodesIds() {
return nodesIds;
}
@SuppressWarnings("unchecked")
public final Request nodesIds(String... nodesIds) {
public final Request setNodesIds(String... nodesIds) {
this.nodesIds = nodesIds;
return (Request) this;
}
@ -98,12 +98,12 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
*
* By default tasks with any ids are returned.
*/
public TaskId taskId() {
public TaskId getTaskId() {
return taskId;
}
@SuppressWarnings("unchecked")
public final Request taskId(TaskId taskId) {
public final Request setTaskId(TaskId taskId) {
this.taskId = taskId;
return (Request) this;
}
@ -112,29 +112,29 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
/**
* Returns the parent task id that tasks should be filtered by
*/
public TaskId parentTaskId() {
public TaskId getParentTaskId() {
return parentTaskId;
}
@SuppressWarnings("unchecked")
public Request parentTaskId(TaskId parentTaskId) {
public Request setParentTaskId(TaskId parentTaskId) {
this.parentTaskId = parentTaskId;
return (Request) this;
}
public TimeValue timeout() {
public TimeValue getTimeout() {
return this.timeout;
}
@SuppressWarnings("unchecked")
public final Request timeout(TimeValue timeout) {
public final Request setTimeout(TimeValue timeout) {
this.timeout = timeout;
return (Request) this;
}
@SuppressWarnings("unchecked")
public final Request timeout(String timeout) {
public final Request setTimeout(String timeout) {
this.timeout = TimeValue.parseTimeValue(timeout, null, getClass().getSimpleName() + ".timeout");
return (Request) this;
}
@ -162,11 +162,11 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
}
public boolean match(Task task) {
if (actions() != null && actions().length > 0 && Regex.simpleMatch(actions(), task.getAction()) == false) {
if (getActions() != null && getActions().length > 0 && Regex.simpleMatch(getActions(), task.getAction()) == false) {
return false;
}
if (taskId().isSet() == false) {
if(taskId().getId() != task.getId()) {
if (getTaskId().isSet() == false) {
if(getTaskId().getId() != task.getId()) {
return false;
}
}

View File

@ -35,19 +35,19 @@ public class TasksRequestBuilder <Request extends BaseTasksRequest<Request>, Res
@SuppressWarnings("unchecked")
public final RequestBuilder setNodesIds(String... nodesIds) {
request.nodesIds(nodesIds);
request.setNodesIds(nodesIds);
return (RequestBuilder) this;
}
@SuppressWarnings("unchecked")
public final RequestBuilder setActions(String... actions) {
request.actions(actions);
request.setActions(actions);
return (RequestBuilder) this;
}
@SuppressWarnings("unchecked")
public final RequestBuilder setTimeout(TimeValue timeout) {
request.timeout(timeout);
request.setTimeout(timeout);
return (RequestBuilder) this;
}
}

View File

@ -124,25 +124,25 @@ public abstract class TransportTasksAction<
}
protected String[] resolveNodes(TasksRequest request, ClusterState clusterState) {
if (request.taskId().isSet()) {
return clusterState.nodes().resolveNodesIds(request.nodesIds());
if (request.getTaskId().isSet()) {
return clusterState.nodes().resolveNodesIds(request.getNodesIds());
} else {
return new String[]{request.taskId().getNodeId()};
return new String[]{request.getTaskId().getNodeId()};
}
}
protected void processTasks(TasksRequest request, Consumer<OperationTask> operation) {
if (request.taskId().isSet() == false) {
if (request.getTaskId().isSet() == false) {
// we are only checking one task, we can optimize it
Task task = taskManager.getTask(request.taskId().getId());
Task task = taskManager.getTask(request.getTaskId().getId());
if (task != null) {
if (request.match(task)) {
operation.accept((OperationTask) task);
} else {
throw new ResourceNotFoundException("task [{}] doesn't support this operation", request.taskId());
throw new ResourceNotFoundException("task [{}] doesn't support this operation", request.getTaskId());
}
} else {
throw new ResourceNotFoundException("task [{}] is missing", request.taskId());
throw new ResourceNotFoundException("task [{}] is missing", request.getTaskId());
}
} else {
for (Task task : taskManager.getTasks().values()) {
@ -224,8 +224,8 @@ public abstract class TransportTasksAction<
}
} else {
TransportRequestOptions.Builder builder = TransportRequestOptions.builder();
if (request.timeout() != null) {
builder.withTimeout(request.timeout());
if (request.getTimeout() != null) {
builder.withTimeout(request.getTimeout());
}
builder.withCompress(transportCompress());
for (int i = 0; i < nodesIds.length; i++) {

View File

@ -225,7 +225,7 @@ final class BootstrapCheck {
static class MaxNumberOfThreadsCheck implements Check {
private final long maxNumberOfThreadsThreshold = 1 << 15;
private final long maxNumberOfThreadsThreshold = 1 << 11;
@Override
public boolean check() {

View File

@ -19,12 +19,14 @@
package org.elasticsearch.cluster.metadata;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.close.CloseIndexClusterStateUpdateRequest;
import org.elasticsearch.action.admin.indices.open.OpenIndexClusterStateUpdateRequest;
import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.RestoreInProgress;
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
import org.elasticsearch.cluster.block.ClusterBlock;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
@ -37,11 +39,14 @@ import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.rest.RestStatus;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
/**
* Service responsible for submitting open/close index requests
@ -78,7 +83,7 @@ public class MetaDataIndexStateService extends AbstractComponent {
@Override
public ClusterState execute(ClusterState currentState) {
List<String> indicesToClose = new ArrayList<>();
Set<String> indicesToClose = new HashSet<>();
for (String index : request.indices()) {
IndexMetaData indexMetaData = currentState.metaData().index(index);
if (indexMetaData == null) {
@ -94,6 +99,28 @@ public class MetaDataIndexStateService extends AbstractComponent {
return currentState;
}
// Check if any of the indices to be closed are currently being restored from a snapshot and fail closing if such an index
// is found as closing an index that is being restored makes the index unusable (it cannot be recovered).
RestoreInProgress restore = currentState.custom(RestoreInProgress.TYPE);
if (restore != null) {
Set<String> indicesToFail = null;
for (RestoreInProgress.Entry entry : restore.entries()) {
for (ObjectObjectCursor<ShardId, RestoreInProgress.ShardRestoreStatus> shard : entry.shards()) {
if (!shard.value.state().completed()) {
if (indicesToClose.contains(shard.key.getIndexName())) {
if (indicesToFail == null) {
indicesToFail = new HashSet<>();
}
indicesToFail.add(shard.key.getIndexName());
}
}
}
}
if (indicesToFail != null) {
throw new IllegalArgumentException("Cannot close indices that are being restored: " + indicesToFail);
}
}
logger.info("closing indices [{}]", indicesAsString);
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());

View File

@ -18,26 +18,23 @@
*/
package org.elasticsearch.common;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.Loggers;
import java.util.EnumSet;
import java.util.HashSet;
/**
* Holds a field that can be found in a request while parsing and its different variants, which may be deprecated.
*/
public class ParseField {
private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(ParseField.class));
private final String camelCaseName;
private final String underscoreName;
private final String[] deprecatedNames;
private String allReplacedWith = null;
static final EnumSet<Flag> EMPTY_FLAGS = EnumSet.noneOf(Flag.class);
static final EnumSet<Flag> STRICT_FLAGS = EnumSet.of(Flag.STRICT);
enum Flag {
STRICT
}
public ParseField(String value, String... deprecatedNames) {
camelCaseName = Strings.toCamelCase(value);
underscoreName = Strings.toUnderscoreCase(value);
@ -80,19 +77,21 @@ public class ParseField {
return parseField;
}
boolean match(String currentFieldName, EnumSet<Flag> flags) {
boolean match(String currentFieldName, boolean strict) {
if (allReplacedWith == null && (currentFieldName.equals(camelCaseName) || currentFieldName.equals(underscoreName))) {
return true;
}
String msg;
for (String depName : deprecatedNames) {
if (currentFieldName.equals(depName)) {
if (flags.contains(Flag.STRICT)) {
msg = "Deprecated field [" + currentFieldName + "] used, expected [" + underscoreName + "] instead";
if (allReplacedWith != null) {
msg = "Deprecated field [" + currentFieldName + "] used, replaced by [" + allReplacedWith + "]";
}
msg = "Deprecated field [" + currentFieldName + "] used, expected [" + underscoreName + "] instead";
if (allReplacedWith != null) {
msg = "Deprecated field [" + currentFieldName + "] used, replaced by [" + allReplacedWith + "]";
}
if (strict) {
throw new IllegalArgumentException(msg);
} else {
DEPRECATION_LOGGER.deprecated(msg);
}
return true;
}

View File

@ -21,29 +21,28 @@ package org.elasticsearch.common;
import org.elasticsearch.common.settings.Settings;
import java.util.EnumSet;
/**
* Matcher to use in combination with {@link ParseField} while parsing requests. Matches a {@link ParseField}
* against a field name and throw deprecation exception depending on the current value of the {@link #PARSE_STRICT} setting.
*/
public class ParseFieldMatcher {
public static final String PARSE_STRICT = "index.query.parse.strict";
public static final ParseFieldMatcher EMPTY = new ParseFieldMatcher(ParseField.EMPTY_FLAGS);
public static final ParseFieldMatcher STRICT = new ParseFieldMatcher(ParseField.STRICT_FLAGS);
public static final ParseFieldMatcher EMPTY = new ParseFieldMatcher(false);
public static final ParseFieldMatcher STRICT = new ParseFieldMatcher(true);
private final EnumSet<ParseField.Flag> parseFlags;
private final boolean strict;
public ParseFieldMatcher(Settings settings) {
if (settings.getAsBoolean(PARSE_STRICT, false)) {
this.parseFlags = EnumSet.of(ParseField.Flag.STRICT);
} else {
this.parseFlags = ParseField.EMPTY_FLAGS;
}
this(settings.getAsBoolean(PARSE_STRICT, false));
}
public ParseFieldMatcher(EnumSet<ParseField.Flag> parseFlags) {
this.parseFlags = parseFlags;
public ParseFieldMatcher(boolean strict) {
this.strict = strict;
}
/** Should deprecated settings be rejected? */
public boolean isStrict() {
return strict;
}
/**
@ -55,6 +54,6 @@ public class ParseFieldMatcher {
* @return true whenever the parse field that we are looking for was found, false otherwise
*/
public boolean match(String fieldName, ParseField parseField) {
return parseField.match(fieldName, parseFlags);
return parseField.match(fieldName, strict);
}
}

View File

@ -35,6 +35,7 @@ import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.percolator.PercolatorQueriesRegistry;
import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.index.store.FsDirectoryService;
import org.elasticsearch.index.store.IndexStore;
import org.elasticsearch.index.store.Store;
@ -44,6 +45,7 @@ import org.elasticsearch.indices.IndicesRequestCache;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.function.Predicate;
@ -133,8 +135,15 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
FsDirectoryService.INDEX_LOCK_FACTOR_SETTING,
EngineConfig.INDEX_CODEC_SETTING,
IndexWarmer.INDEX_NORMS_LOADING_SETTING,
// this sucks but we can't really validate all the analyzers/similarity in here
Setting.groupSetting("index.similarity.", false, Setting.Scope.INDEX), // this allows similarity settings to be passed
// validate that built-in similarities don't get redefined
Setting.groupSetting("index.similarity.", false, Setting.Scope.INDEX, (s) -> {
Map<String, Settings> groups = s.getAsGroups();
for (String key : SimilarityService.BUILT_IN.keySet()) {
if (groups.containsKey(key)) {
throw new IllegalArgumentException("illegal value for [index.similarity."+ key + "] cannot redefine built-in similarity");
}
}
}), // this allows similarity settings to be passed
Setting.groupSetting("index.analysis.", false, Setting.Scope.INDEX) // this allows analysis settings to be passed
)));

View File

@ -20,6 +20,7 @@ package org.elasticsearch.common.settings;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.support.ToXContentToBytes;
import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.Strings;
@ -30,16 +31,19 @@ import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.MemorySizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
@ -177,7 +181,7 @@ public class Setting<T> extends ToXContentToBytes {
/**
* Returns <code>true</code> iff this setting is present in the given settings object. Otherwise <code>false</code>
*/
public final boolean exists(Settings settings) {
public boolean exists(Settings settings) {
return settings.get(getKey()) != null;
}
@ -505,17 +509,45 @@ public class Setting<T> extends ToXContentToBytes {
throw new ElasticsearchException(ex);
}
}
public static Setting<Settings> groupSetting(String key, boolean dynamic, Scope scope) {
return groupSetting(key, dynamic, scope, (s) -> {});
}
public static Setting<Settings> groupSetting(String key, boolean dynamic, Scope scope, Consumer<Settings> validator) {
return new Setting<Settings>(new GroupKey(key), (s) -> "", (s) -> null, dynamic, scope) {
@Override
public boolean isGroupSetting() {
return true;
}
@Override
public String getRaw(Settings settings) {
Settings subSettings = get(settings);
try {
XContentBuilder builder = XContentFactory.jsonBuilder();
builder.startObject();
subSettings.toXContent(builder, EMPTY_PARAMS);
builder.endObject();
return builder.string();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public Settings get(Settings settings) {
return settings.getByPrefix(getKey());
Settings byPrefix = settings.getByPrefix(getKey());
validator.accept(byPrefix);
return byPrefix;
}
@Override
public boolean exists(Settings settings) {
for (Map.Entry<String, String> entry : settings.getAsMap().entrySet()) {
if (entry.getKey().startsWith(key)) {
return true;
}
}
return false;
}
@Override

View File

@ -26,7 +26,6 @@ import org.elasticsearch.cluster.routing.RoutingService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.component.LifecycleComponent;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.node.service.NodeService;
import java.io.IOException;
@ -41,11 +40,6 @@ public interface Discovery extends LifecycleComponent<Discovery> {
String nodeDescription();
/**
* Here as a hack to solve dep injection problem...
*/
void setNodeService(@Nullable NodeService nodeService);
/**
* Another hack to solve dep injection problem..., note, this will be called before
* any start is called.

View File

@ -33,7 +33,6 @@ import org.elasticsearch.cluster.routing.RoutingService;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.internal.Nullable;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.settings.ClusterSettings;
@ -45,7 +44,6 @@ import org.elasticsearch.discovery.BlockingClusterStatePublishResponseHandler;
import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.discovery.DiscoverySettings;
import org.elasticsearch.discovery.DiscoveryStats;
import org.elasticsearch.node.service.NodeService;
import java.util.HashSet;
import java.util.Queue;
@ -84,11 +82,6 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
this.discoverySettings = new DiscoverySettings(settings, clusterSettings);
}
@Override
public void setNodeService(@Nullable NodeService nodeService) {
// nothing to do here
}
@Override
public void setRoutingService(RoutingService routingService) {
this.routingService = routingService;

View File

@ -20,8 +20,6 @@
package org.elasticsearch.discovery.zen;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.node.service.NodeService;
/**
*
@ -30,6 +28,4 @@ public interface DiscoveryNodesProvider {
DiscoveryNodes nodes();
@Nullable
NodeService nodeService();
}

View File

@ -60,7 +60,6 @@ import org.elasticsearch.discovery.zen.ping.ZenPing;
import org.elasticsearch.discovery.zen.ping.ZenPingService;
import org.elasticsearch.discovery.zen.publish.PendingClusterStateStats;
import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction;
import org.elasticsearch.node.service.NodeService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.EmptyTransportResponseHandler;
import org.elasticsearch.transport.TransportChannel;
@ -137,10 +136,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
/** counts the time this node has joined the cluster or have elected it self as master */
private final AtomicLong clusterJoinsCounter = new AtomicLong();
@Nullable
private NodeService nodeService;
// must initialized in doStart(), when we have the routingService set
private volatile NodeJoinController nodeJoinController;
@ -192,11 +187,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
transportService.registerRequestHandler(DISCOVERY_REJOIN_ACTION_NAME, RejoinClusterRequest::new, ThreadPool.Names.SAME, new RejoinClusterRequestHandler());
}
@Override
public void setNodeService(@Nullable NodeService nodeService) {
this.nodeService = nodeService;
}
@Override
public void setRoutingService(RoutingService routingService) {
this.routingService = routingService;
@ -292,11 +282,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
return clusterService.state().nodes();
}
@Override
public NodeService nodeService() {
return this.nodeService;
}
@Override
public boolean nodeHasJoinedClusterOnce() {
return clusterJoinsCounter.get() > 0;

View File

@ -104,6 +104,8 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
&& matchingNodes.isNodeMatchBySyncID(nodeWithHighestMatch) == true) {
// we found a better match that has a full sync id match, the existing allocation is not fully synced
// so we found a better one, cancel this one
logger.debug("cancelling allocation of replica on [{}], sync id match found on node [{}]",
currentNode, nodeWithHighestMatch);
it.moveToUnassigned(new UnassignedInfo(UnassignedInfo.Reason.REALLOCATED_REPLICA,
"existing allocation of replica to [" + currentNode + "] cancelled, sync id match found on node [" + nodeWithHighestMatch + "]",
null, allocation.getCurrentNanoTime(), System.currentTimeMillis()));

View File

@ -19,6 +19,7 @@
package org.elasticsearch.index;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
@ -50,7 +51,14 @@ public class Index implements Writeable<Index> {
@Override
public String toString() {
return "[" + name + "]";
/*
* If we have a uuid we put it in the toString so it'll show up in logs which is useful as more and more things use the uuid rather
* than the name as the lookup key for the index.
*/
if (ClusterState.UNKNOWN_UUID.equals(uuid)) {
return "[" + name + "]";
}
return "[" + name + "/" + uuid + "]";
}
@Override

View File

@ -85,9 +85,9 @@ public final class KeywordFieldMapper extends FieldMapper implements AllFieldMap
@Override
public Builder indexOptions(IndexOptions indexOptions) {
if (fieldType.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) > 0) {
if (indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS) > 0) {
throw new IllegalArgumentException("The [keyword] field does not support positions, got [index_options]="
+ indexOptionToString(fieldType.indexOptions()));
+ indexOptionToString(indexOptions));
}
return super.indexOptions(indexOptions);
}

View File

@ -28,6 +28,8 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.Iterators;
import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.common.geo.GeoUtils;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
@ -56,6 +58,7 @@ import static org.elasticsearch.index.mapper.core.TypeParsers.parseMultiField;
*/
public abstract class BaseGeoPointFieldMapper extends FieldMapper implements ArrayValueMapperParser {
public static final String CONTENT_TYPE = "geo_point";
protected static final DeprecationLogger deprecationLogger = new DeprecationLogger(Loggers.getLogger(BaseGeoPointFieldMapper.class));
public static class Names {
public static final String LAT = "lat";
public static final String LAT_SUFFIX = "." + LAT;
@ -194,9 +197,13 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
String propName = Strings.toUnderscoreCase(entry.getKey());
Object propNode = entry.getValue();
if (propName.equals("lat_lon")) {
deprecationLogger.deprecated(CONTENT_TYPE + " lat_lon parameter is deprecated and will be removed "
+ "in the next major release");
builder.enableLatLon(XContentMapValues.lenientNodeBooleanValue(propNode));
iterator.remove();
} else if (propName.equals("precision_step")) {
deprecationLogger.deprecated(CONTENT_TYPE + " precision_step parameter is deprecated and will be removed "
+ "in the next major release");
builder.precisionStep(XContentMapValues.nodeIntegerValue(propNode));
iterator.remove();
} else if (propName.equals("geohash")) {

View File

@ -21,6 +21,7 @@ package org.elasticsearch.index.similarity;
import org.apache.lucene.search.similarities.PerFieldSimilarityWrapper;
import org.apache.lucene.search.similarities.Similarity;
import org.elasticsearch.Version;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.AbstractIndexComponent;
import org.elasticsearch.index.IndexModule;
@ -63,6 +64,10 @@ public final class SimilarityService extends AbstractIndexComponent {
Map<String, Settings> similaritySettings = this.indexSettings.getSettings().getGroups(IndexModule.SIMILARITY_SETTINGS_PREFIX);
for (Map.Entry<String, Settings> entry : similaritySettings.entrySet()) {
String name = entry.getKey();
// Starting with v5.0 indices, it should no longer be possible to redefine built-in similarities
if(BUILT_IN.containsKey(name) && indexSettings.getIndexVersionCreated().onOrAfter(Version.V_5_0_0)) {
throw new IllegalArgumentException("Cannot redefine built-in Similarity [" + name + "]");
}
Settings settings = entry.getValue();
String typeName = settings.get("type");
if (typeName == null) {
@ -76,9 +81,16 @@ public final class SimilarityService extends AbstractIndexComponent {
}
providers.put(name, factory.apply(name, settings));
}
addSimilarities(similaritySettings, providers, DEFAULTS);
for (Map.Entry<String, SimilarityProvider> entry : addSimilarities(similaritySettings, DEFAULTS).entrySet()) {
// Avoid overwriting custom providers for indices older that v5.0
if (providers.containsKey(entry.getKey()) && indexSettings.getIndexVersionCreated().before(Version.V_5_0_0)) {
continue;
}
providers.put(entry.getKey(), entry.getValue());
}
this.similarities = providers;
defaultSimilarity = providers.get(SimilarityService.DEFAULT_SIMILARITY).get();
defaultSimilarity = (providers.get("default") != null) ? providers.get("default").get()
: providers.get(SimilarityService.DEFAULT_SIMILARITY).get();
// Expert users can configure the base type as being different to default, but out-of-box we use default.
baseSimilarity = (providers.get("base") != null) ? providers.get("base").get() :
defaultSimilarity;
@ -90,7 +102,9 @@ public final class SimilarityService extends AbstractIndexComponent {
defaultSimilarity;
}
private void addSimilarities(Map<String, Settings> similaritySettings, Map<String, SimilarityProvider> providers, Map<String, BiFunction<String, Settings, SimilarityProvider>> similarities) {
private Map<String, SimilarityProvider> addSimilarities(Map<String, Settings> similaritySettings,
Map<String, BiFunction<String, Settings, SimilarityProvider>> similarities) {
Map<String, SimilarityProvider> providers = new HashMap<>(similarities.size());
for (Map.Entry<String, BiFunction<String, Settings, SimilarityProvider>> entry : similarities.entrySet()) {
String name = entry.getKey();
BiFunction<String, Settings, SimilarityProvider> factory = entry.getValue();
@ -100,12 +114,17 @@ public final class SimilarityService extends AbstractIndexComponent {
}
providers.put(name, factory.apply(name, settings));
}
return providers;
}
public SimilarityProvider getSimilarity(String name) {
return similarities.get(name);
}
public SimilarityProvider getDefaultSimilarity() {
return similarities.get("default");
}
static class PerFieldSimilarity extends PerFieldSimilarityWrapper {
private final Similarity defaultSimilarity;

View File

@ -84,7 +84,6 @@ public class NodeService extends AbstractComponent implements Closeable {
this.transportService = transportService;
this.indicesService = indicesService;
this.discovery = discovery;
discovery.setNodeService(this);
this.version = version;
this.pluginService = pluginService;
this.circuitBreakerService = circuitBreakerService;

View File

@ -52,10 +52,10 @@ public class RestCancelTasksAction extends BaseRestHandler {
TaskId parentTaskId = new TaskId(request.param("parent_task_id"));
CancelTasksRequest cancelTasksRequest = new CancelTasksRequest();
cancelTasksRequest.taskId(taskId);
cancelTasksRequest.nodesIds(nodesIds);
cancelTasksRequest.actions(actions);
cancelTasksRequest.parentTaskId(parentTaskId);
cancelTasksRequest.setTaskId(taskId);
cancelTasksRequest.setNodesIds(nodesIds);
cancelTasksRequest.setActions(actions);
cancelTasksRequest.setParentTaskId(parentTaskId);
client.admin().cluster().cancelTasks(cancelTasksRequest, new RestToXContentListener<>(channel));
}
}

View File

@ -50,13 +50,15 @@ public class RestListTasksAction extends BaseRestHandler {
TaskId taskId = new TaskId(request.param("taskId"));
String[] actions = Strings.splitStringByCommaToArray(request.param("actions"));
TaskId parentTaskId = new TaskId(request.param("parent_task_id"));
boolean waitForCompletion = request.paramAsBoolean("wait_for_completion", false);
ListTasksRequest listTasksRequest = new ListTasksRequest();
listTasksRequest.taskId(taskId);
listTasksRequest.nodesIds(nodesIds);
listTasksRequest.detailed(detailed);
listTasksRequest.actions(actions);
listTasksRequest.parentTaskId(parentTaskId);
listTasksRequest.setTaskId(taskId);
listTasksRequest.setNodesIds(nodesIds);
listTasksRequest.setDetailed(detailed);
listTasksRequest.setActions(actions);
listTasksRequest.setParentTaskId(parentTaskId);
listTasksRequest.setWaitForCompletion(waitForCompletion);
client.admin().cluster().listTasks(listTasksRequest, new RestToXContentListener<>(channel));
}
}

View File

@ -144,8 +144,12 @@ public class RestAnalyzeAction extends BaseRestHandler {
charFilters.add(parser.text());
}
analyzeRequest.charFilters(charFilters.toArray(new String[charFilters.size()]));
} else if (parseFieldMatcher.match(currentFieldName, Fields.EXPLAIN) && token == XContentParser.Token.VALUE_BOOLEAN) {
analyzeRequest.explain(parser.booleanValue());
} else if (parseFieldMatcher.match(currentFieldName, Fields.EXPLAIN)) {
if (parser.isBooleanValue()) {
analyzeRequest.explain(parser.booleanValue());
} else {
throw new IllegalArgumentException(currentFieldName + " must be either 'true' or 'false'");
}
} else if (parseFieldMatcher.match(currentFieldName, Fields.ATTRIBUTES) && token == XContentParser.Token.START_ARRAY){
List<String> attributes = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {

View File

@ -35,8 +35,6 @@ public class FieldSortBuilder extends SortBuilder {
private Object missing;
private Boolean ignoreUnmapped;
private String unmappedType;
private String sortMode;
@ -76,17 +74,6 @@ public class FieldSortBuilder extends SortBuilder {
return this;
}
/**
* Sets if the field does not exists in the index, it should be ignored and not sorted by or not. Defaults
* to <tt>false</tt> (not ignoring).
* @deprecated Use {@link #unmappedType(String)} instead.
*/
@Deprecated
public FieldSortBuilder ignoreUnmapped(boolean ignoreUnmapped) {
this.ignoreUnmapped = ignoreUnmapped;
return this;
}
/**
* Set the type to use in case the current field is not mapped in an index.
* Specifying a type tells Elasticsearch what type the sort values should have, which is important
@ -138,9 +125,6 @@ public class FieldSortBuilder extends SortBuilder {
if (missing != null) {
builder.field("missing", missing);
}
if (ignoreUnmapped != null) {
builder.field(SortParseElement.IGNORE_UNMAPPED.getPreferredName(), ignoreUnmapped);
}
if (unmappedType != null) {
builder.field(SortParseElement.UNMAPPED_TYPE.getPreferredName(), unmappedType);
}

View File

@ -30,7 +30,6 @@ import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.fielddata.IndexFieldData;
import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.core.LongFieldMapper;
import org.elasticsearch.index.query.support.NestedInnerQueryParseSupport;
import org.elasticsearch.search.MultiValueMode;
import org.elasticsearch.search.SearchParseElement;
@ -55,7 +54,6 @@ public class SortParseElement implements SearchParseElement {
private static final SortField SORT_DOC = new SortField(null, SortField.Type.DOC);
private static final SortField SORT_DOC_REVERSE = new SortField(null, SortField.Type.DOC, true);
public static final ParseField IGNORE_UNMAPPED = new ParseField("ignore_unmapped");
public static final ParseField UNMAPPED_TYPE = new ParseField("unmapped_type");
public static final String SCORE_FIELD_NAME = "_score";
@ -156,12 +154,6 @@ public class SortParseElement implements SearchParseElement {
}
} else if ("missing".equals(innerJsonName)) {
missing = parser.textOrNull();
} else if (context.parseFieldMatcher().match(innerJsonName, IGNORE_UNMAPPED)) {
// backward compatibility: ignore_unmapped has been replaced with unmapped_type
if (unmappedType == null // don't override if unmapped_type has been provided too
&& parser.booleanValue()) {
unmappedType = LongFieldMapper.CONTENT_TYPE;
}
} else if (context.parseFieldMatcher().match(innerJsonName, UNMAPPED_TYPE)) {
unmappedType = parser.textOrNull();
} else if ("mode".equals(innerJsonName)) {

View File

@ -222,7 +222,7 @@ public class ThreadPool extends AbstractComponent implements Closeable {
int halfProcMaxAt5 = Math.min(((availableProcessors + 1) / 2), 5);
int halfProcMaxAt10 = Math.min(((availableProcessors + 1) / 2), 10);
Map<String, Settings> defaultExecutorTypeSettings = new HashMap<>();
add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.GENERIC).keepAlive("30s"));
add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.GENERIC).size(4 * availableProcessors).keepAlive("30s"));
add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.INDEX).size(availableProcessors).queueSize(200));
add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.BULK).size(availableProcessors).queueSize(50));
add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.GET).size(availableProcessors).queueSize(1000));

View File

@ -237,8 +237,8 @@ public class CancellableTasksTests extends TaskManagerTestCase {
// Cancel main task
CancelTasksRequest request = new CancelTasksRequest();
request.reason("Testing Cancellation");
request.taskId(new TaskId(testNodes[0].discoveryNode.getId(), mainTask.getId()));
request.setReason("Testing Cancellation");
request.setTaskId(new TaskId(testNodes[0].discoveryNode.getId(), mainTask.getId()));
// And send the cancellation request to a random node
CancelTasksResponse response = testNodes[randomIntBetween(0, testNodes.length - 1)].transportCancelTasksAction.execute(request)
.get();
@ -270,7 +270,7 @@ public class CancellableTasksTests extends TaskManagerTestCase {
// Make sure that tasks are no longer running
ListTasksResponse listTasksResponse = testNodes[randomIntBetween(0, testNodes.length - 1)]
.transportListTasksAction.execute(new ListTasksRequest().taskId(
.transportListTasksAction.execute(new ListTasksRequest().setTaskId(
new TaskId(testNodes[0].discoveryNode.getId(), mainTask.getId()))).get();
assertEquals(0, listTasksResponse.getTasks().size());
@ -313,7 +313,7 @@ public class CancellableTasksTests extends TaskManagerTestCase {
// Make sure that tasks are running
ListTasksResponse listTasksResponse = testNodes[randomIntBetween(0, testNodes.length - 1)]
.transportListTasksAction.execute(new ListTasksRequest().parentTaskId(new TaskId(mainNode, mainTask.getId()))).get();
.transportListTasksAction.execute(new ListTasksRequest().setParentTaskId(new TaskId(mainNode, mainTask.getId()))).get();
assertThat(listTasksResponse.getTasks().size(), greaterThanOrEqualTo(blockOnNodes.size()));
// Simulate the coordinating node leaving the cluster
@ -331,8 +331,8 @@ public class CancellableTasksTests extends TaskManagerTestCase {
logger.info("--> Simulate issuing cancel request on the node that is about to leave the cluster");
// Simulate issuing cancel request on the node that is about to leave the cluster
CancelTasksRequest request = new CancelTasksRequest();
request.reason("Testing Cancellation");
request.taskId(new TaskId(testNodes[0].discoveryNode.getId(), mainTask.getId()));
request.setReason("Testing Cancellation");
request.setTaskId(new TaskId(testNodes[0].discoveryNode.getId(), mainTask.getId()));
// And send the cancellation request to a random node
CancelTasksResponse response = testNodes[0].transportCancelTasksAction.execute(request).get();
logger.info("--> Done simulating issuing cancel request on the node that is about to leave the cluster");
@ -356,7 +356,7 @@ public class CancellableTasksTests extends TaskManagerTestCase {
// Make sure that tasks are no longer running
try {
ListTasksResponse listTasksResponse1 = testNodes[randomIntBetween(1, testNodes.length - 1)]
.transportListTasksAction.execute(new ListTasksRequest().taskId(new TaskId(mainNode, mainTask.getId()))).get();
.transportListTasksAction.execute(new ListTasksRequest().setTaskId(new TaskId(mainNode, mainTask.getId()))).get();
assertEquals(0, listTasksResponse1.getTasks().size());
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();

View File

@ -18,6 +18,8 @@
*/
package org.elasticsearch.action.admin.cluster.node.tasks;
import org.elasticsearch.ElasticsearchTimeoutException;
import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.ListenableActionFuture;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction;
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse;
@ -40,6 +42,7 @@ import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.tasks.MockTaskManager;
import org.elasticsearch.test.tasks.MockTaskManagerListener;
import org.elasticsearch.test.transport.MockTransportService;
import org.elasticsearch.transport.ReceiveTimeoutTransportException;
import java.io.IOException;
import java.util.ArrayList;
@ -54,8 +57,11 @@ import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReentrantLock;
import java.util.function.Function;
import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
import static org.hamcrest.Matchers.either;
import static org.hamcrest.Matchers.emptyCollectionOf;
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.lessThanOrEqualTo;
import static org.hamcrest.Matchers.not;
@ -327,6 +333,78 @@ public class TasksIT extends ESIntegTestCase {
assertEquals(0, client().admin().cluster().prepareListTasks().setActions(TestTaskPlugin.TestTaskAction.NAME + "[n]").get().getTasks().size());
}
public void testTasksListWaitForCompletion() throws Exception {
// Start blocking test task
ListenableActionFuture<TestTaskPlugin.NodesResponse> future = TestTaskPlugin.TestTaskAction.INSTANCE.newRequestBuilder(client())
.execute();
ListenableActionFuture<ListTasksResponse> waitResponseFuture;
try {
// Wait for the task to start on all nodes
assertBusy(() -> assertEquals(internalCluster().numDataAndMasterNodes(),
client().admin().cluster().prepareListTasks().setActions(TestTaskPlugin.TestTaskAction.NAME + "[n]").get().getTasks().size()));
// Spin up a request to wait for that task to finish
waitResponseFuture = client().admin().cluster().prepareListTasks()
.setActions(TestTaskPlugin.TestTaskAction.NAME + "[n]").setWaitForCompletion(true).execute();
} finally {
// Unblock the request so the wait for completion request can finish
TestTaskPlugin.UnblockTestTasksAction.INSTANCE.newRequestBuilder(client()).get();
}
// Now that the task is unblocked the list response will come back
ListTasksResponse waitResponse = waitResponseFuture.get();
// If any tasks come back then they are the tasks we asked for - it'd be super weird if this wasn't true
for (TaskInfo task: waitResponse.getTasks()) {
assertEquals(task.getAction(), TestTaskPlugin.TestTaskAction.NAME + "[n]");
}
// See the next test to cover the timeout case
future.get();
}
public void testTasksListWaitForTimeout() throws Exception {
// Start blocking test task
ListenableActionFuture<TestTaskPlugin.NodesResponse> future = TestTaskPlugin.TestTaskAction.INSTANCE.newRequestBuilder(client())
.execute();
try {
// Wait for the task to start on all nodes
assertBusy(() -> assertEquals(internalCluster().numDataAndMasterNodes(),
client().admin().cluster().prepareListTasks().setActions(TestTaskPlugin.TestTaskAction.NAME + "[n]").get().getTasks().size()));
// Spin up a request that should wait for those tasks to finish
// It will timeout because we haven't unblocked the tasks
ListTasksResponse waitResponse = client().admin().cluster().prepareListTasks()
.setActions(TestTaskPlugin.TestTaskAction.NAME + "[n]").setWaitForCompletion(true).setTimeout(timeValueMillis(100))
.get();
assertFalse(waitResponse.getNodeFailures().isEmpty());
for (FailedNodeException failure : waitResponse.getNodeFailures()) {
Throwable timeoutException = failure.getCause();
// The exception sometimes comes back wrapped depending on the client
if (timeoutException.getCause() != null) {
timeoutException = timeoutException.getCause();
}
assertThat(timeoutException,
either(instanceOf(ElasticsearchTimeoutException.class)).or(instanceOf(ReceiveTimeoutTransportException.class)));
}
} finally {
// Now we can unblock those requests
TestTaskPlugin.UnblockTestTasksAction.INSTANCE.newRequestBuilder(client()).get();
}
future.get();
}
public void testTasksListWaitForNoTask() throws Exception {
// Spin up a request to wait for no matching tasks
ListenableActionFuture<ListTasksResponse> waitResponseFuture = client().admin().cluster().prepareListTasks()
.setActions(TestTaskPlugin.TestTaskAction.NAME + "[n]").setWaitForCompletion(true).setTimeout(timeValueMillis(10))
.execute();
// It should finish quickly and without complaint
assertThat(waitResponseFuture.get().getTasks(), emptyCollectionOf(TaskInfo.class));
}
@Override
public void tearDown() throws Exception {
for (Map.Entry<Tuple<String, String>, RecordingTaskManagerListener> entry : listeners.entrySet()) {

View File

@ -345,7 +345,10 @@ public class TestTaskPlugin extends Plugin {
public static class UnblockTestTasksRequest extends BaseTasksRequest<UnblockTestTasksRequest> {
@Override
public boolean match(Task task) {
return task instanceof TestTask && super.match(task);
}
}
public static class UnblockTestTasksResponse extends BaseTasksResponse {

View File

@ -355,7 +355,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase {
int testNodeNum = randomIntBetween(0, testNodes.length - 1);
TestNode testNode = testNodes[testNodeNum];
ListTasksRequest listTasksRequest = new ListTasksRequest();
listTasksRequest.actions("testAction*"); // pick all test actions
listTasksRequest.setActions("testAction*"); // pick all test actions
logger.info("Listing currently running tasks using node [{}]", testNodeNum);
ListTasksResponse response = testNode.transportListTasksAction.execute(listTasksRequest).get();
logger.info("Checking currently running tasks");
@ -371,7 +371,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase {
// Check task counts using transport with filtering
testNode = testNodes[randomIntBetween(0, testNodes.length - 1)];
listTasksRequest = new ListTasksRequest();
listTasksRequest.actions("testAction[n]"); // only pick node actions
listTasksRequest.setActions("testAction[n]"); // only pick node actions
response = testNode.transportListTasksAction.execute(listTasksRequest).get();
assertEquals(testNodes.length, response.getPerNodeTasks().size());
for (Map.Entry<DiscoveryNode, List<TaskInfo>> entry : response.getPerNodeTasks().entrySet()) {
@ -380,7 +380,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase {
}
// Check task counts using transport with detailed description
listTasksRequest.detailed(true); // same request only with detailed description
listTasksRequest.setDetailed(true); // same request only with detailed description
response = testNode.transportListTasksAction.execute(listTasksRequest).get();
assertEquals(testNodes.length, response.getPerNodeTasks().size());
for (Map.Entry<DiscoveryNode, List<TaskInfo>> entry : response.getPerNodeTasks().entrySet()) {
@ -389,7 +389,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase {
}
// Make sure that the main task on coordinating node is the task that was returned to us by execute()
listTasksRequest.actions("testAction"); // only pick the main task
listTasksRequest.setActions("testAction"); // only pick the main task
response = testNode.transportListTasksAction.execute(listTasksRequest).get();
assertEquals(1, response.getTasks().size());
assertEquals(mainTask.getId(), response.getTasks().get(0).getId());
@ -417,7 +417,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase {
// Get the parent task
ListTasksRequest listTasksRequest = new ListTasksRequest();
listTasksRequest.actions("testAction");
listTasksRequest.setActions("testAction");
ListTasksResponse response = testNode.transportListTasksAction.execute(listTasksRequest).get();
assertEquals(1, response.getTasks().size());
String parentNode = response.getTasks().get(0).getNode().getId();
@ -425,7 +425,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase {
// Find tasks with common parent
listTasksRequest = new ListTasksRequest();
listTasksRequest.parentTaskId(new TaskId(parentNode, parentTaskId));
listTasksRequest.setParentTaskId(new TaskId(parentNode, parentTaskId));
response = testNode.transportListTasksAction.execute(listTasksRequest).get();
assertEquals(testNodes.length, response.getTasks().size());
for (TaskInfo task : response.getTasks()) {
@ -451,7 +451,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase {
// Get the parent task
ListTasksRequest listTasksRequest = new ListTasksRequest();
listTasksRequest.actions("testAction*");
listTasksRequest.setActions("testAction*");
ListTasksResponse response = testNode.transportListTasksAction.execute(listTasksRequest).get();
assertEquals(0, response.getTasks().size());
@ -472,7 +472,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase {
// Check task counts using transport with filtering
TestNode testNode = testNodes[randomIntBetween(0, testNodes.length - 1)];
ListTasksRequest listTasksRequest = new ListTasksRequest();
listTasksRequest.actions("testAction[n]"); // only pick node actions
listTasksRequest.setActions("testAction[n]"); // only pick node actions
ListTasksResponse response = testNode.transportListTasksAction.execute(listTasksRequest).get();
assertEquals(testNodes.length, response.getPerNodeTasks().size());
for (Map.Entry<DiscoveryNode, List<TaskInfo>> entry : response.getPerNodeTasks().entrySet()) {
@ -482,7 +482,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase {
// Check task counts using transport with detailed description
long minimalDurationNanos = System.nanoTime() - maximumStartTimeNanos;
listTasksRequest.detailed(true); // same request only with detailed description
listTasksRequest.setDetailed(true); // same request only with detailed description
response = testNode.transportListTasksAction.execute(listTasksRequest).get();
assertEquals(testNodes.length, response.getPerNodeTasks().size());
for (Map.Entry<DiscoveryNode, List<TaskInfo>> entry : response.getPerNodeTasks().entrySet()) {
@ -518,9 +518,9 @@ public class TransportTasksActionTests extends TaskManagerTestCase {
// Try to cancel main task using action name
CancelTasksRequest request = new CancelTasksRequest();
request.nodesIds(testNodes[0].discoveryNode.getId());
request.reason("Testing Cancellation");
request.actions(actionName);
request.setNodesIds(testNodes[0].discoveryNode.getId());
request.setReason("Testing Cancellation");
request.setActions(actionName);
CancelTasksResponse response = testNodes[randomIntBetween(0, testNodes.length - 1)].transportCancelTasksAction.execute(request)
.get();
@ -532,8 +532,8 @@ public class TransportTasksActionTests extends TaskManagerTestCase {
// Try to cancel main task using id
request = new CancelTasksRequest();
request.reason("Testing Cancellation");
request.taskId(new TaskId(testNodes[0].discoveryNode.getId(), task.getId()));
request.setReason("Testing Cancellation");
request.setTaskId(new TaskId(testNodes[0].discoveryNode.getId(), task.getId()));
response = testNodes[randomIntBetween(0, testNodes.length - 1)].transportCancelTasksAction.execute(request).get();
// Shouldn't match any tasks since testAction doesn't support cancellation
@ -544,7 +544,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase {
// Make sure that task is still running
ListTasksRequest listTasksRequest = new ListTasksRequest();
listTasksRequest.actions(actionName);
listTasksRequest.setActions(actionName);
ListTasksResponse listResponse = testNodes[randomIntBetween(0, testNodes.length - 1)].transportListTasksAction.execute
(listTasksRequest).get();
assertEquals(1, listResponse.getPerNodeTasks().size());
@ -617,7 +617,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase {
// Run task action on node tasks that are currently running
// should be successful on all nodes except one
TestTasksRequest testTasksRequest = new TestTasksRequest();
testTasksRequest.actions("testAction[n]"); // pick all test actions
testTasksRequest.setActions("testAction[n]"); // pick all test actions
TestTasksResponse response = tasksActions[0].execute(testTasksRequest).get();
// Get successful responses from all nodes except one
assertEquals(testNodes.length - 1, response.tasks.size());

View File

@ -131,7 +131,7 @@ public class BootstrapCheckTests extends ESTestCase {
}
public void testMaxNumberOfThreadsCheck() {
final int limit = 1 << 15;
final int limit = 1 << 11;
final AtomicLong maxNumberOfThreads = new AtomicLong(randomIntBetween(1, limit - 1));
final BootstrapCheck.MaxNumberOfThreadsCheck check = new BootstrapCheck.MaxNumberOfThreadsCheck() {
@Override

View File

@ -20,8 +20,7 @@ package org.elasticsearch.common;
import org.elasticsearch.test.ESTestCase;
import java.util.EnumSet;
import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.not;
import static org.hamcrest.CoreMatchers.sameInstance;
@ -33,38 +32,29 @@ public class ParseFieldTests extends ESTestCase {
String[] deprecated = new String[]{"barFoo", "bar_foo"};
ParseField withDeprecations = field.withDeprecation("Foobar", randomFrom(deprecated));
assertThat(field, not(sameInstance(withDeprecations)));
assertThat(field.match(randomFrom(values), ParseField.EMPTY_FLAGS), is(true));
assertThat(field.match("foo bar", ParseField.EMPTY_FLAGS), is(false));
assertThat(field.match(randomFrom(deprecated), ParseField.EMPTY_FLAGS), is(false));
assertThat(field.match("barFoo", ParseField.EMPTY_FLAGS), is(false));
assertThat(field.match(randomFrom(values), false), is(true));
assertThat(field.match("foo bar", false), is(false));
assertThat(field.match(randomFrom(deprecated), false), is(false));
assertThat(field.match("barFoo", false), is(false));
assertThat(withDeprecations.match(randomFrom(values), ParseField.EMPTY_FLAGS), is(true));
assertThat(withDeprecations.match("foo bar", ParseField.EMPTY_FLAGS), is(false));
assertThat(withDeprecations.match(randomFrom(deprecated), ParseField.EMPTY_FLAGS), is(true));
assertThat(withDeprecations.match("barFoo", ParseField.EMPTY_FLAGS), is(true));
assertThat(withDeprecations.match(randomFrom(values), false), is(true));
assertThat(withDeprecations.match("foo bar", false), is(false));
assertThat(withDeprecations.match(randomFrom(deprecated), false), is(true));
assertThat(withDeprecations.match("barFoo", false), is(true));
// now with strict mode
EnumSet<ParseField.Flag> flags = EnumSet.of(ParseField.Flag.STRICT);
assertThat(field.match(randomFrom(values), flags), is(true));
assertThat(field.match("foo bar", flags), is(false));
assertThat(field.match(randomFrom(deprecated), flags), is(false));
assertThat(field.match("barFoo", flags), is(false));
assertThat(field.match(randomFrom(values), true), is(true));
assertThat(field.match("foo bar", true), is(false));
assertThat(field.match(randomFrom(deprecated), true), is(false));
assertThat(field.match("barFoo", true), is(false));
assertThat(withDeprecations.match(randomFrom(values), flags), is(true));
assertThat(withDeprecations.match("foo bar", flags), is(false));
try {
withDeprecations.match(randomFrom(deprecated), flags);
fail();
} catch (IllegalArgumentException ex) {
}
try {
withDeprecations.match("barFoo", flags);
fail();
} catch (IllegalArgumentException ex) {
}
assertThat(withDeprecations.match(randomFrom(values), true), is(true));
assertThat(withDeprecations.match("foo bar", true), is(false));
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
() -> withDeprecations.match(randomFrom(deprecated), true));
assertThat(e.getMessage(), containsString("used, expected [foo_bar] instead"));
e = expectThrows(IllegalArgumentException.class, () -> withDeprecations.match("barFoo", true));
assertThat(e.getMessage(), containsString("Deprecated field [barFoo] used, expected [foo_bar] instead"));
}
public void testAllDeprecated() {
@ -72,30 +62,29 @@ public class ParseFieldTests extends ESTestCase {
boolean withDeprecatedNames = randomBoolean();
String[] deprecated = new String[]{"text", "same_as_text"};
String[] allValues = values;
String[] allValues;
if (withDeprecatedNames) {
String[] newArray = new String[allValues.length + deprecated.length];
System.arraycopy(allValues, 0, newArray, 0, allValues.length);
System.arraycopy(deprecated, 0, newArray, allValues.length, deprecated.length);
String[] newArray = new String[values.length + deprecated.length];
System.arraycopy(values, 0, newArray, 0, values.length);
System.arraycopy(deprecated, 0, newArray, values.length, deprecated.length);
allValues = newArray;
} else {
allValues = values;
}
ParseField field = new ParseField(randomFrom(values));
ParseField field;
if (withDeprecatedNames) {
field = field.withDeprecation(deprecated);
field = new ParseField(randomFrom(values)).withDeprecation(deprecated).withAllDeprecated("like");
} else {
field = new ParseField(randomFrom(values)).withAllDeprecated("like");
}
field = field.withAllDeprecated("like");
// strict mode off
assertThat(field.match(randomFrom(allValues), ParseField.EMPTY_FLAGS), is(true));
assertThat(field.match("not a field name", ParseField.EMPTY_FLAGS), is(false));
assertThat(field.match(randomFrom(allValues), false), is(true));
assertThat(field.match("not a field name", false), is(false));
// now with strict mode
EnumSet<ParseField.Flag> flags = EnumSet.of(ParseField.Flag.STRICT);
try {
field.match(randomFrom(allValues), flags);
fail();
} catch (IllegalArgumentException ex) {
}
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> field.match(randomFrom(allValues), true));
assertThat(e.getMessage(), containsString(" used, replaced by [like]"));
}
}

View File

@ -216,6 +216,13 @@ public class ScopedSettingsTests extends ESTestCase {
} catch (IllegalArgumentException e) {
assertEquals("Failed to parse value [true] for setting [index.number_of_replicas]", e.getMessage());
}
try {
settings.validate("index.similarity.classic.type", Settings.builder().put("index.similarity.classic.type", "mine").build());
fail();
} catch (IllegalArgumentException e) {
assertEquals("illegal value for [index.similarity.classic] cannot redefine built-in similarity", e.getMessage());
}
}

View File

@ -33,7 +33,6 @@ import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
import org.elasticsearch.discovery.zen.ping.PingContextProvider;
import org.elasticsearch.discovery.zen.ping.ZenPing;
import org.elasticsearch.node.service.NodeService;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@ -82,11 +81,6 @@ public class UnicastZenPingIT extends ESTestCase {
return DiscoveryNodes.builder().put(nodeA).localNodeId("UZP_A").build();
}
@Override
public NodeService nodeService() {
return null;
}
@Override
public boolean nodeHasJoinedClusterOnce() {
return false;
@ -101,11 +95,6 @@ public class UnicastZenPingIT extends ESTestCase {
return DiscoveryNodes.builder().put(nodeB).localNodeId("UZP_B").build();
}
@Override
public NodeService nodeService() {
return null;
}
@Override
public boolean nodeHasJoinedClusterOnce() {
return true;

View File

@ -43,7 +43,6 @@ import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.discovery.DiscoverySettings;
import org.elasticsearch.discovery.zen.DiscoveryNodesProvider;
import org.elasticsearch.node.Node;
import org.elasticsearch.node.service.NodeService;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.junit.annotations.TestLogging;
import org.elasticsearch.test.transport.MockTransportService;
@ -134,11 +133,6 @@ public class PublishClusterStateActionTests extends ESTestCase {
return clusterState.nodes();
}
@Override
public NodeService nodeService() {
assert false;
throw new UnsupportedOperationException("Shouldn't be here");
}
}
public MockNode createMockNode(final String name) throws Exception {

View File

@ -0,0 +1,44 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.common.Strings;
import org.elasticsearch.test.ESTestCase;
import static org.apache.lucene.util.TestUtil.randomSimpleString;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.not;
public class IndexTests extends ESTestCase {
public void testToString() {
assertEquals("[name/uuid]", new Index("name", "uuid").toString());
assertEquals("[name]", new Index("name", ClusterState.UNKNOWN_UUID).toString());
Index random = new Index(randomSimpleString(random(), 1, 100),
usually() ? Strings.randomBase64UUID(random()) : ClusterState.UNKNOWN_UUID);
assertThat(random.toString(), containsString(random.getName()));
if (ClusterState.UNKNOWN_UUID.equals(random.getUUID())) {
assertThat(random.toString(), not(containsString(random.getUUID())));
} else {
assertThat(random.toString(), containsString(random.getUUID()));
}
}
}

View File

@ -34,6 +34,7 @@ import org.elasticsearch.test.ESSingleNodeTestCase;
import org.junit.Before;
import java.io.IOException;
import java.util.Arrays;
import static org.hamcrest.Matchers.equalTo;
@ -200,4 +201,35 @@ public class KeywordFieldMapperTests extends ESSingleNodeTestCase {
assertEquals(1, fields.length);
assertEquals(DocValuesType.NONE, fields[0].fieldType().docValuesType());
}
public void testIndexOptions() throws IOException {
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("properties").startObject("field").field("type", "keyword")
.field("index_options", "freqs").endObject().endObject()
.endObject().endObject().string();
DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping));
assertEquals(mapping, mapper.mappingSource().toString());
ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder()
.startObject()
.field("field", "1234")
.endObject()
.bytes());
IndexableField[] fields = doc.rootDoc().getFields("field");
assertEquals(2, fields.length);
assertEquals(IndexOptions.DOCS_AND_FREQS, fields[0].fieldType().indexOptions());
for (String indexOptions : Arrays.asList("positions", "offsets")) {
final String mapping2 = XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("properties").startObject("field").field("type", "keyword")
.field("index_options", indexOptions).endObject().endObject()
.endObject().endObject().string();
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
() -> parser.parse("type", new CompressedXContent(mapping2)));
assertEquals("The [keyword] field does not support positions, got [index_options]=" + indexOptions, e.getMessage());
}
}
}

View File

@ -0,0 +1,62 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.similarity;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.IndexSettingsModule;
import java.util.Collections;
public class SimilarityServiceTests extends ESTestCase {
// Tests #16594
public void testOverrideBuiltInSimilarity() {
Settings settings = Settings.builder().put("index.similarity.BM25.type", "classic").build();
IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", settings);
try {
new SimilarityService(indexSettings, Collections.emptyMap());
fail("can't override bm25");
} catch (IllegalArgumentException ex) {
assertEquals(ex.getMessage(), "Cannot redefine built-in Similarity [BM25]");
}
}
// Pre v3 indices could override built-in similarities
public void testOverrideBuiltInSimilarityPreV3() {
Settings settings = Settings.builder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_0_0)
.put("index.similarity.BM25.type", "classic")
.build();
IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", settings);
SimilarityService service = new SimilarityService(indexSettings, Collections.emptyMap());
assertTrue(service.getSimilarity("BM25") instanceof ClassicSimilarityProvider);
}
// Tests #16594
public void testDefaultSimilarity() {
Settings settings = Settings.builder().put("index.similarity.default.type", "BM25").build();
IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", settings);
SimilarityService service = new SimilarityService(indexSettings, Collections.emptyMap());
assertTrue(service.getDefaultSimilarity() instanceof BM25SimilarityProvider);
}
}

View File

@ -19,15 +19,8 @@
package org.elasticsearch.indices.analyze;
import org.elasticsearch.action.admin.indices.alias.Alias;
import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest;
import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequestBuilder;
import org.elasticsearch.action.admin.indices.analyze.AnalyzeResponse;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.rest.action.admin.indices.analyze.RestAnalyzeAction;
import org.elasticsearch.test.ESIntegTestCase;
import org.hamcrest.core.IsNull;
@ -196,53 +189,6 @@ public class AnalyzeActionIT extends ESIntegTestCase {
return randomBoolean() ? "test" : "alias";
}
public void testParseXContentForAnalyzeReuqest() throws Exception {
BytesReference content = XContentFactory.jsonBuilder()
.startObject()
.field("text", "THIS IS A TEST")
.field("tokenizer", "keyword")
.array("filters", "lowercase")
.endObject().bytes();
AnalyzeRequest analyzeRequest = new AnalyzeRequest("for test");
RestAnalyzeAction.buildFromContent(content, analyzeRequest, new ParseFieldMatcher(Settings.EMPTY));
assertThat(analyzeRequest.text().length, equalTo(1));
assertThat(analyzeRequest.text(), equalTo(new String[]{"THIS IS A TEST"}));
assertThat(analyzeRequest.tokenizer(), equalTo("keyword"));
assertThat(analyzeRequest.tokenFilters(), equalTo(new String[]{"lowercase"}));
}
public void testParseXContentForAnalyzeRequestWithInvalidJsonThrowsException() throws Exception {
AnalyzeRequest analyzeRequest = new AnalyzeRequest("for test");
try {
RestAnalyzeAction.buildFromContent(new BytesArray("{invalid_json}"), analyzeRequest, new ParseFieldMatcher(Settings.EMPTY));
fail("shouldn't get here");
} catch (Exception e) {
assertThat(e, instanceOf(IllegalArgumentException.class));
assertThat(e.getMessage(), equalTo("Failed to parse request body"));
}
}
public void testParseXContentForAnalyzeRequestWithUnknownParamThrowsException() throws Exception {
AnalyzeRequest analyzeRequest = new AnalyzeRequest("for test");
BytesReference invalidContent =XContentFactory.jsonBuilder()
.startObject()
.field("text", "THIS IS A TEST")
.field("unknown", "keyword")
.endObject().bytes();
try {
RestAnalyzeAction.buildFromContent(invalidContent, analyzeRequest, new ParseFieldMatcher(Settings.EMPTY));
fail("shouldn't get here");
} catch (Exception e) {
assertThat(e, instanceOf(IllegalArgumentException.class));
assertThat(e.getMessage(), startsWith("Unknown parameter [unknown]"));
}
}
public void testAnalyzerWithMultiValues() throws Exception {
assertAcked(prepareCreate("test").addAlias(new Alias("alias")));
ensureGreen();

View File

@ -94,7 +94,7 @@ public class RecoverySourceHandlerTests extends ESTestCase {
@Override
public void close() throws IOException {
super.close();
store.directory().sync(Collections.singleton(md.name())); // sync otherwise MDW will mess with it
targetStore.directory().sync(Collections.singleton(md.name())); // sync otherwise MDW will mess with it
}
};
} catch (IOException e) {

View File

@ -0,0 +1,98 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.rest.action.admin.indices.analyze;
import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.test.ESTestCase;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.startsWith;
public class RestAnalyzeActionTests extends ESTestCase {
public void testParseXContentForAnalyzeRequest() throws Exception {
BytesReference content = XContentFactory.jsonBuilder()
.startObject()
.field("text", "THIS IS A TEST")
.field("tokenizer", "keyword")
.array("filters", "lowercase")
.endObject().bytes();
AnalyzeRequest analyzeRequest = new AnalyzeRequest("for test");
RestAnalyzeAction.buildFromContent(content, analyzeRequest, new ParseFieldMatcher(Settings.EMPTY));
assertThat(analyzeRequest.text().length, equalTo(1));
assertThat(analyzeRequest.text(), equalTo(new String[]{"THIS IS A TEST"}));
assertThat(analyzeRequest.tokenizer(), equalTo("keyword"));
assertThat(analyzeRequest.tokenFilters(), equalTo(new String[]{"lowercase"}));
}
public void testParseXContentForAnalyzeRequestWithInvalidJsonThrowsException() throws Exception {
AnalyzeRequest analyzeRequest = new AnalyzeRequest("for test");
try {
RestAnalyzeAction.buildFromContent(new BytesArray("{invalid_json}"), analyzeRequest, new ParseFieldMatcher(Settings.EMPTY));
fail("shouldn't get here");
} catch (Exception e) {
assertThat(e, instanceOf(IllegalArgumentException.class));
assertThat(e.getMessage(), equalTo("Failed to parse request body"));
}
}
public void testParseXContentForAnalyzeRequestWithUnknownParamThrowsException() throws Exception {
AnalyzeRequest analyzeRequest = new AnalyzeRequest("for test");
BytesReference invalidContent = XContentFactory.jsonBuilder()
.startObject()
.field("text", "THIS IS A TEST")
.field("unknown", "keyword")
.endObject().bytes();
try {
RestAnalyzeAction.buildFromContent(invalidContent, analyzeRequest, new ParseFieldMatcher(Settings.EMPTY));
fail("shouldn't get here");
} catch (Exception e) {
assertThat(e, instanceOf(IllegalArgumentException.class));
assertThat(e.getMessage(), startsWith("Unknown parameter [unknown]"));
}
}
public void testParseXContentForAnalyzeRequestWithInvalidStringExplainParamThrowsException() throws Exception {
AnalyzeRequest analyzeRequest = new AnalyzeRequest("for test");
BytesReference invalidExplain = XContentFactory.jsonBuilder()
.startObject()
.field("explain", "fals")
.endObject().bytes();
try {
RestAnalyzeAction.buildFromContent(invalidExplain, analyzeRequest, new ParseFieldMatcher(Settings.EMPTY));
fail("shouldn't get here");
} catch (Exception e) {
assertThat(e, instanceOf(IllegalArgumentException.class));
assertThat(e.getMessage(), startsWith("explain must be either 'true' or 'false'"));
}
}
}

View File

@ -137,6 +137,32 @@ public abstract class AbstractSnapshotIntegTestCase extends ESIntegTestCase {
return null;
}
public static void blockAllDataNodes(String repository) {
for(RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) {
((MockRepository)repositoriesService.repository(repository)).blockOnDataFiles(true);
}
}
public static void unblockAllDataNodes(String repository) {
for(RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) {
((MockRepository)repositoriesService.repository(repository)).unblock();
}
}
public void waitForBlockOnAnyDataNode(String repository, TimeValue timeout) throws InterruptedException {
if (false == awaitBusy(() -> {
for(RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) {
MockRepository mockRepository = (MockRepository) repositoriesService.repository(repository);
if (mockRepository.blocked()) {
return true;
}
}
return false;
}, timeout.millis(), TimeUnit.MILLISECONDS)) {
fail("Timeout waiting for repository block on any data node!!!");
}
}
public static void unblockNode(String node) {
((MockRepository)internalCluster().getInstance(RepositoriesService.class, node).repository("test-repo")).unblock();
}

View File

@ -1865,6 +1865,66 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
}
}
public void testCloseIndexDuringRestore() throws Exception {
Client client = client();
logger.info("--> creating repository");
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
.setType("mock").setSettings(Settings.settingsBuilder()
.put("location", randomRepoPath())
.put("compress", randomBoolean())
.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)
));
createIndex("test-idx-1", "test-idx-2");
ensureGreen();
logger.info("--> indexing some data");
for (int i = 0; i < 100; i++) {
index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i);
index("test-idx-2", "doc", Integer.toString(i), "foo", "baz" + i);
}
refresh();
assertThat(client.prepareSearch("test-idx-1").setSize(0).get().getHits().totalHits(), equalTo(100L));
assertThat(client.prepareSearch("test-idx-2").setSize(0).get().getHits().totalHits(), equalTo(100L));
logger.info("--> snapshot");
assertThat(client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap")
.setIndices("test-idx-*").setWaitForCompletion(true).get().getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS));
logger.info("--> deleting indices before restoring");
assertAcked(client.admin().indices().prepareDelete("test-idx-*").get());
blockAllDataNodes("test-repo");
logger.info("--> execution will be blocked on all data nodes");
logger.info("--> start restore");
ListenableActionFuture<RestoreSnapshotResponse> restoreFut = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap")
.setWaitForCompletion(true)
.execute();
logger.info("--> waiting for block to kick in");
waitForBlockOnAnyDataNode("test-repo", TimeValue.timeValueSeconds(60));
logger.info("--> close index while restore is running");
try {
client.admin().indices().prepareClose("test-idx-1").get();
fail("Expected closing index to fail during restore");
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), containsString("Cannot close indices that are being restored: [test-idx-1]"));
}
logger.info("--> unblocking all data nodes");
unblockAllDataNodes("test-repo");
logger.info("--> wait for restore to finish");
RestoreSnapshotResponse restoreSnapshotResponse = restoreFut.get();
logger.info("--> check that all shards were recovered");
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), greaterThan(0));
assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0));
}
public void testDeleteOrphanSnapshot() throws Exception {
Client client = client();

View File

@ -21,13 +21,11 @@ package org.elasticsearch.test;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.RoutingService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.component.Lifecycle;
import org.elasticsearch.common.component.LifecycleListener;
import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.discovery.DiscoverySettings;
import org.elasticsearch.discovery.DiscoveryStats;
import org.elasticsearch.node.service.NodeService;
public class NoopDiscovery implements Discovery {
@ -42,11 +40,6 @@ public class NoopDiscovery implements Discovery {
return null;
}
@Override
public void setNodeService(@Nullable NodeService nodeService) {
}
@Override
public void setRoutingService(RoutingService routingService) {

View File

@ -43,6 +43,13 @@ using the <<cluster-nodes-info>> API, with:
curl localhost:9200/_nodes/stats/process?pretty
--------------------------------------------------
[float]
[[max-number-of-threads]]
==== Number of threads
Make sure that the number of threads that the Elasticsearch user can
create is at least 2048.
[float]
[[vm-max-map-count]]
==== Virtual memory

View File

@ -58,9 +58,6 @@
---
"wait_for_completion=false":
- skip:
version: "0.0.0 - "
reason: breaks other tests by leaving a running reindex behind
- do:
index:
index: source
@ -79,6 +76,7 @@
dest:
index: dest
- match: {task: '/.+:\d+/'}
- set: {task: task}
- is_false: updated
- is_false: version_conflicts
- is_false: batches
@ -87,6 +85,11 @@
- is_false: took
- is_false: created
- do:
tasks.list:
wait_for_completion: true
task_id: $task
---
"Response format for version conflict":
- do:

View File

@ -37,6 +37,7 @@
wait_for_completion: false
index: test
- match: {task: '/.+:\d+/'}
- set: {task: task}
- is_false: updated
- is_false: version_conflicts
- is_false: batches
@ -45,6 +46,11 @@
- is_false: took
- is_false: created
- do:
tasks.list:
wait_for_completion: true
task_id: $task
---
"Response for version conflict":
- do:

View File

@ -44,13 +44,13 @@
"type" : "string",
"description" : "The name of the tokenizer to use for the analysis"
},
"detail": {
"explain": {
"type" : "boolean",
"description" : "With `true`, outputs more advanced details. (default: false)"
},
"attributes": {
"type" : "list",
"description" : "A comma-separated list of token attributes to output, this parameter works only with `detail=true`"
"description" : "A comma-separated list of token attributes to output, this parameter works only with `explain=true`"
},
"format": {
"type": "enum",

View File

@ -31,6 +31,10 @@
"parent_task": {
"type" : "number",
"description" : "Return tasks with specified parent task id. Set to -1 to return all."
},
"wait_for_completion": {
"type": "boolean",
"description": "Wait for the matching tasks to complete (default: false)"
}
}
},

View File

@ -75,7 +75,7 @@ setup:
"Detail response with Analyzer":
- do:
indices.analyze:
body: {"text": "This is troubled", "analyzer": standard, "explain": true}
body: {"text": "This is troubled", "analyzer": standard, "explain": "true"}
- length: { detail.analyzer.tokens: 3 }
- match: { detail.analyzer.name: standard }
- match: { detail.analyzer.tokens.0.token: this }

View File

@ -28,6 +28,10 @@ import org.elasticsearch.test.StreamsUtils;
import org.junit.After;
import org.junit.Before;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.isEmptyString;
import static org.hamcrest.Matchers.not;
public abstract class CliToolTestCase extends ESTestCase {
@Before
@ -51,8 +55,10 @@ public abstract class CliToolTestCase extends ESTestCase {
public static void assertTerminalOutputContainsHelpFile(MockTerminal terminal, String classPath) throws IOException {
String output = terminal.getOutput();
assertFalse(output, output.isEmpty());
assertThat(output, not(isEmptyString()));
String expectedDocs = StreamsUtils.copyToStringFromClasspath(classPath);
assertTrue(output, output.contains(expectedDocs));
// convert to *nix newlines as MockTerminal used for tests also uses *nix newlines
expectedDocs = expectedDocs.replace("\r\n", "\n");
assertThat(output, containsString(expectedDocs));
}
}

View File

@ -19,14 +19,34 @@
package org.elasticsearch.test.rest;
import com.carrotsearch.randomizedtesting.RandomizedTest;
import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.nio.file.FileSystem;
import java.nio.file.FileSystems;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.rest.client.RestException;
import org.elasticsearch.test.rest.client.RestResponse;
import org.elasticsearch.test.rest.parser.RestTestParseException;
import org.elasticsearch.test.rest.parser.RestTestSuiteParser;
import org.elasticsearch.test.rest.section.DoSection;
@ -42,24 +62,11 @@ import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import java.io.IOException;
import java.io.InputStream;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.nio.file.FileSystem;
import java.nio.file.FileSystems;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import com.carrotsearch.randomizedtesting.RandomizedTest;
import static java.util.Collections.emptyList;
import static java.util.Collections.emptyMap;
import static java.util.Collections.sort;
/**
* Runs the clients test suite against an elasticsearch cluster.
@ -261,7 +268,6 @@ public abstract class ESRestTestCase extends ESTestCase {
@After
public void wipeCluster() throws Exception {
// wipe indices
Map<String, String> deleteIndicesArgs = new HashMap<>();
deleteIndicesArgs.put("index", "*");
@ -285,6 +291,30 @@ public abstract class ESRestTestCase extends ESTestCase {
adminExecutionContext.callApi("snapshot.delete_repository", deleteSnapshotsArgs, Collections.emptyList(), Collections.emptyMap());
}
/**
* Logs a message if there are still running tasks. The reasoning is that any tasks still running are state the is trying to bleed into
* other tests.
*/
@After
public void logIfThereAreRunningTasks() throws InterruptedException, IOException, RestException {
RestResponse tasks = adminExecutionContext.callApi("tasks.list", emptyMap(), emptyList(), emptyMap());
Set<String> runningTasks = runningTasks(tasks);
// Ignore the task list API - it doens't count against us
runningTasks.remove(ListTasksAction.NAME);
runningTasks.remove(ListTasksAction.NAME + "[n]");
if (runningTasks.isEmpty()) {
return;
}
List<String> stillRunning = new ArrayList<>(runningTasks);
sort(stillRunning);
logger.info("There are still tasks running after this test that might break subsequent tests {}.", stillRunning);
/*
* This isn't a higher level log or outright failure because some of these tasks are run by the cluster in the background. If we
* could determine that some tasks are run by the user we'd fail the tests if those tasks were running and ignore any background
* tasks.
*/
}
@AfterClass
public static void close() {
if (restTestExecutionContext != null) {
@ -365,4 +395,19 @@ public abstract class ESRestTestCase extends ESTestCase {
executableSection.execute(restTestExecutionContext);
}
}
@SuppressWarnings("unchecked")
public Set<String> runningTasks(RestResponse response) throws IOException {
Set<String> runningTasks = new HashSet<>();
Map<String, Object> nodes = (Map<String, Object>) response.evaluate("nodes");
for (Map.Entry<String, Object> node : nodes.entrySet()) {
Map<String, Object> nodeInfo = (Map<String, Object>) node.getValue();
Map<String, Object> nodeTasks = (Map<String, Object>) nodeInfo.get("tasks");
for (Map.Entry<String, Object> taskAndName : nodeTasks.entrySet()) {
Map<String, Object> task = (Map<String, Object>) taskAndName.getValue();
runningTasks.add(task.get("action").toString());
}
}
return runningTasks;
}
}

View File

@ -114,9 +114,10 @@ public class HttpRequestBuilder {
for (String pathPart : path) {
try {
finalPath.append('/');
URI uri = new URI(null, null, null, -1, pathPart, null, null);
// We append "/" to the path part to handle parts that start with - or other invalid characters
URI uri = new URI(null, null, null, -1, "/" + pathPart, null, null);
//manually escape any slash that each part may contain
finalPath.append(uri.getRawPath().replaceAll("/", "%2F"));
finalPath.append(uri.getRawPath().substring(1).replaceAll("/", "%2F"));
} catch(URISyntaxException e) {
throw new RuntimeException("unable to build uri", e);
}