Merge branch 'master' into feature/rank-eval

This commit is contained in:
Christoph Büscher 2016-11-16 11:57:22 +01:00
commit 6c6ddf0245
84 changed files with 1757 additions and 794 deletions

11
Vagrantfile vendored
View File

@ -280,5 +280,16 @@ export BATS_UTILS=/project/build/bats/utils
export BATS_TESTS=/project/build/bats/tests export BATS_TESTS=/project/build/bats/tests
export BATS_ARCHIVES=/project/build/bats/archives export BATS_ARCHIVES=/project/build/bats/archives
VARS VARS
cat \<\<SUDOERS_VARS > /etc/sudoers.d/elasticsearch_vars
Defaults env_keep += "ZIP"
Defaults env_keep += "TAR"
Defaults env_keep += "RPM"
Defaults env_keep += "DEB"
Defaults env_keep += "BATS"
Defaults env_keep += "BATS_UTILS"
Defaults env_keep += "BATS_TESTS"
Defaults env_keep += "BATS_ARCHIVES"
SUDOERS_VARS
chmod 0440 /etc/sudoers.d/elasticsearch_vars
SHELL SHELL
end end

View File

@ -40,7 +40,7 @@ class VagrantTestPlugin implements Plugin<Project> {
static List<String> UPGRADE_FROM_ARCHIVES = ['rpm', 'deb'] static List<String> UPGRADE_FROM_ARCHIVES = ['rpm', 'deb']
private static final BATS = 'bats' private static final BATS = 'bats'
private static final String BATS_TEST_COMMAND ="cd \$BATS_ARCHIVES && sudo -E bats --tap \$BATS_TESTS/*.$BATS" private static final String BATS_TEST_COMMAND ="cd \$BATS_ARCHIVES && sudo bats --tap \$BATS_TESTS/*.$BATS"
@Override @Override
void apply(Project project) { void apply(Project project) {

View File

@ -19,7 +19,6 @@
package org.elasticsearch; package org.elasticsearch;
import org.apache.lucene.util.MathUtil;
import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.SuppressForbidden;
@ -92,6 +91,8 @@ public class Version {
public static final Version V_5_0_0_rc1 = new Version(V_5_0_0_rc1_ID, org.apache.lucene.util.Version.LUCENE_6_2_0); public static final Version V_5_0_0_rc1 = new Version(V_5_0_0_rc1_ID, org.apache.lucene.util.Version.LUCENE_6_2_0);
public static final int V_5_0_0_ID = 5000099; public static final int V_5_0_0_ID = 5000099;
public static final Version V_5_0_0 = new Version(V_5_0_0_ID, org.apache.lucene.util.Version.LUCENE_6_2_0); public static final Version V_5_0_0 = new Version(V_5_0_0_ID, org.apache.lucene.util.Version.LUCENE_6_2_0);
public static final int V_5_0_1_ID = 5000199;
public static final Version V_5_0_1 = new Version(V_5_0_1_ID, org.apache.lucene.util.Version.LUCENE_6_2_1);
public static final int V_6_0_0_alpha1_ID = 6000001; public static final int V_6_0_0_alpha1_ID = 6000001;
public static final Version V_6_0_0_alpha1 = new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_3_0); public static final Version V_6_0_0_alpha1 = new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_3_0);
public static final Version CURRENT = V_6_0_0_alpha1; public static final Version CURRENT = V_6_0_0_alpha1;
@ -118,6 +119,8 @@ public class Version {
switch (id) { switch (id) {
case V_6_0_0_alpha1_ID: case V_6_0_0_alpha1_ID:
return V_6_0_0_alpha1; return V_6_0_0_alpha1;
case V_5_0_1_ID:
return V_5_0_1;
case V_5_0_0_ID: case V_5_0_0_ID:
return V_5_0_0; return V_5_0_0;
case V_5_0_0_rc1_ID: case V_5_0_0_rc1_ID:

View File

@ -152,12 +152,14 @@ public class IndicesStatsResponse extends BroadcastResponse implements ToXConten
@Override @Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
String level = params.param("level", "indices"); final String level = params.param("level", "indices");
boolean isLevelValid = "indices".equalsIgnoreCase(level) || "shards".equalsIgnoreCase(level) || "cluster".equalsIgnoreCase(level); final boolean isLevelValid =
"cluster".equalsIgnoreCase(level) || "indices".equalsIgnoreCase(level) || "shards".equalsIgnoreCase(level);
if (!isLevelValid) { if (!isLevelValid) {
return builder; throw new IllegalArgumentException("level parameter must be one of [cluster] or [indices] or [shards] but was [" + level + "]");
} }
builder.startObject("_all"); builder.startObject("_all");
builder.startObject("primaries"); builder.startObject("primaries");

View File

@ -164,12 +164,6 @@ public class GetResponse extends ActionResponse implements Iterable<GetField>, T
return getResult.toXContent(builder, params); return getResult.toXContent(builder, params);
} }
public static GetResponse readGetResponse(StreamInput in) throws IOException {
GetResponse result = new GetResponse();
result.readFrom(in);
return result;
}
@Override @Override
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) throws IOException {
super.readFrom(in); super.readFrom(in);

View File

@ -70,6 +70,7 @@ import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;
import java.util.IdentityHashMap; import java.util.IdentityHashMap;
import java.util.Iterator; import java.util.Iterator;
import java.util.LinkedHashSet;
import java.util.List; import java.util.List;
import java.util.Locale; import java.util.Locale;
import java.util.Map; import java.util.Map;
@ -114,7 +115,7 @@ public class ClusterService extends AbstractLifecycleComponent {
private final Collection<ClusterStateListener> priorityClusterStateListeners = new CopyOnWriteArrayList<>(); private final Collection<ClusterStateListener> priorityClusterStateListeners = new CopyOnWriteArrayList<>();
private final Collection<ClusterStateListener> clusterStateListeners = new CopyOnWriteArrayList<>(); private final Collection<ClusterStateListener> clusterStateListeners = new CopyOnWriteArrayList<>();
private final Collection<ClusterStateListener> lastClusterStateListeners = new CopyOnWriteArrayList<>(); private final Collection<ClusterStateListener> lastClusterStateListeners = new CopyOnWriteArrayList<>();
private final Map<ClusterStateTaskExecutor, List<UpdateTask>> updateTasksPerExecutor = new HashMap<>(); final Map<ClusterStateTaskExecutor, LinkedHashSet<UpdateTask>> updateTasksPerExecutor = new HashMap<>();
// TODO this is rather frequently changing I guess a Synced Set would be better here and a dedicated remove API // TODO this is rather frequently changing I guess a Synced Set would be better here and a dedicated remove API
private final Collection<ClusterStateListener> postAppliedListeners = new CopyOnWriteArrayList<>(); private final Collection<ClusterStateListener> postAppliedListeners = new CopyOnWriteArrayList<>();
private final Iterable<ClusterStateListener> preAppliedListeners = Iterables.concat(priorityClusterStateListeners, private final Iterable<ClusterStateListener> preAppliedListeners = Iterables.concat(priorityClusterStateListeners,
@ -454,7 +455,8 @@ public class ClusterService extends AbstractLifecycleComponent {
).collect(Collectors.toList()); ).collect(Collectors.toList());
synchronized (updateTasksPerExecutor) { synchronized (updateTasksPerExecutor) {
List<UpdateTask> existingTasks = updateTasksPerExecutor.computeIfAbsent(executor, k -> new ArrayList<>()); LinkedHashSet<UpdateTask> existingTasks = updateTasksPerExecutor.computeIfAbsent(executor,
k -> new LinkedHashSet<>(updateTasks.size()));
for (@SuppressWarnings("unchecked") UpdateTask<T> existing : existingTasks) { for (@SuppressWarnings("unchecked") UpdateTask<T> existing : existingTasks) {
if (tasksIdentity.containsKey(existing.task)) { if (tasksIdentity.containsKey(existing.task)) {
throw new IllegalStateException("task [" + executor.describeTasks(Collections.singletonList(existing.task)) + throw new IllegalStateException("task [" + executor.describeTasks(Collections.singletonList(existing.task)) +
@ -466,12 +468,29 @@ public class ClusterService extends AbstractLifecycleComponent {
final UpdateTask<T> firstTask = updateTasks.get(0); final UpdateTask<T> firstTask = updateTasks.get(0);
if (config.timeout() != null) { final TimeValue timeout = config.timeout();
updateTasksExecutor.execute(firstTask, threadPool.scheduler(), config.timeout(), () -> threadPool.generic().execute(() -> { if (timeout != null) {
updateTasksExecutor.execute(firstTask, threadPool.scheduler(), timeout, () -> threadPool.generic().execute(() -> {
final ArrayList<UpdateTask<T>> toRemove = new ArrayList<>();
for (UpdateTask<T> task : updateTasks) { for (UpdateTask<T> task : updateTasks) {
if (task.processed.getAndSet(true) == false) { if (task.processed.getAndSet(true) == false) {
logger.debug("cluster state update task [{}] timed out after [{}]", source, config.timeout()); logger.debug("cluster state update task [{}] timed out after [{}]", source, timeout);
task.listener.onFailure(source, new ProcessClusterEventTimeoutException(config.timeout(), source)); toRemove.add(task);
}
}
if (toRemove.isEmpty() == false) {
ClusterStateTaskExecutor<T> clusterStateTaskExecutor = toRemove.get(0).executor;
synchronized (updateTasksPerExecutor) {
LinkedHashSet<UpdateTask> existingTasks = updateTasksPerExecutor.get(clusterStateTaskExecutor);
if (existingTasks != null) {
existingTasks.removeAll(toRemove);
if (existingTasks.isEmpty()) {
updateTasksPerExecutor.remove(clusterStateTaskExecutor);
}
}
}
for (UpdateTask<T> task : toRemove) {
task.listener.onFailure(source, new ProcessClusterEventTimeoutException(timeout, source));
} }
} }
})); }));
@ -567,7 +586,7 @@ public class ClusterService extends AbstractLifecycleComponent {
final ArrayList<UpdateTask<T>> toExecute = new ArrayList<>(); final ArrayList<UpdateTask<T>> toExecute = new ArrayList<>();
final Map<String, ArrayList<T>> processTasksBySource = new HashMap<>(); final Map<String, ArrayList<T>> processTasksBySource = new HashMap<>();
synchronized (updateTasksPerExecutor) { synchronized (updateTasksPerExecutor) {
List<UpdateTask> pending = updateTasksPerExecutor.remove(executor); LinkedHashSet<UpdateTask> pending = updateTasksPerExecutor.remove(executor);
if (pending != null) { if (pending != null) {
for (UpdateTask<T> task : pending) { for (UpdateTask<T> task : pending) {
if (task.processed.getAndSet(true) == false) { if (task.processed.getAndSet(true) == false) {

View File

@ -128,15 +128,38 @@ public abstract class Rounding implements Streamable {
@Override @Override
public long round(long utcMillis) { public long round(long utcMillis) {
long rounded = field.roundFloor(utcMillis); long rounded = field.roundFloor(utcMillis);
if (timeZone.isFixed() == false && timeZone.getOffset(utcMillis) != timeZone.getOffset(rounded)) { if (timeZone.isFixed() == false) {
// in this case, we crossed a time zone transition. In some edge // special cases for non-fixed time zones with dst transitions
// cases this will if (timeZone.getOffset(utcMillis) != timeZone.getOffset(rounded)) {
// result in a value that is not a rounded value itself. We need /*
// to round again * the offset change indicates a dst transition. In some
// to make sure. This will have no affect in cases where * edge cases this will result in a value that is not a
// 'rounded' was already a proper * rounded value before the transition. We round again to
// rounded value * make sure we really return a rounded value. This will
rounded = field.roundFloor(rounded); * have no effect in cases where we already had a valid
* rounded value
*/
rounded = field.roundFloor(rounded);
} else {
/*
* check if the current time instant is at a start of a DST
* overlap by comparing the offset of the instant and the
* previous millisecond. We want to detect negative offset
* changes that result in an overlap
*/
if (timeZone.getOffset(rounded) < timeZone.getOffset(rounded - 1)) {
/*
* we are rounding a date just after a DST overlap. if
* the overlap is smaller than the time unit we are
* rounding to, we want to add the overlapping part to
* the following rounding interval
*/
long previousRounded = field.roundFloor(rounded - 1);
if (rounded - previousRounded < field.getDurationField().getUnitMillis()) {
rounded = previousRounded;
}
}
}
} }
assert rounded == field.roundFloor(rounded); assert rounded == field.roundFloor(rounded);
return rounded; return rounded;

View File

@ -137,6 +137,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
IndexModule.INDEX_STORE_PRE_LOAD_SETTING, IndexModule.INDEX_STORE_PRE_LOAD_SETTING,
IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING, IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING,
IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING, IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING,
IndexModule.INDEX_QUERY_CACHE_TERM_QUERIES_SETTING,
PrimaryShardAllocator.INDEX_RECOVERY_INITIAL_SHARDS_SETTING, PrimaryShardAllocator.INDEX_RECOVERY_INITIAL_SHARDS_SETTING,
FsDirectoryService.INDEX_LOCK_FACTOR_SETTING, FsDirectoryService.INDEX_LOCK_FACTOR_SETTING,
EngineConfig.INDEX_CODEC_SETTING, EngineConfig.INDEX_CODEC_SETTING,

View File

@ -19,30 +19,9 @@
package org.elasticsearch.discovery.zen; package org.elasticsearch.discovery.zen;
import java.io.Closeable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Queue;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Function;
import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier; import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterName;
@ -53,6 +32,8 @@ import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
@ -75,6 +56,25 @@ import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Queue;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Function;
import static java.util.Collections.emptyList; import static java.util.Collections.emptyList;
import static java.util.Collections.emptyMap; import static java.util.Collections.emptyMap;
import static java.util.Collections.emptySet; import static java.util.Collections.emptySet;
@ -186,9 +186,9 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing {
} }
@Override @Override
public void close() throws IOException { public void close() {
ThreadPool.terminate(unicastConnectExecutor, 0, TimeUnit.SECONDS); ThreadPool.terminate(unicastConnectExecutor, 0, TimeUnit.SECONDS);
IOUtils.close(receivedResponses.values()); Releasables.close(receivedResponses.values());
closed = true; closed = true;
} }
@ -272,7 +272,7 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing {
} }
} }
class SendPingsHandler implements Closeable { class SendPingsHandler implements Releasable {
private final int id; private final int id;
private final Set<DiscoveryNode> nodeToDisconnect = ConcurrentCollections.newConcurrentSet(); private final Set<DiscoveryNode> nodeToDisconnect = ConcurrentCollections.newConcurrentSet();
private final PingCollection pingCollection; private final PingCollection pingCollection;

View File

@ -47,6 +47,7 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.internal.Nullable; import org.elasticsearch.common.inject.internal.Nullable;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Setting.Property;
@ -240,6 +241,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
joinThreadControl.stop(); joinThreadControl.stop();
masterFD.stop("zen disco stop"); masterFD.stop("zen disco stop");
nodesFD.stop(); nodesFD.stop();
Releasables.close(zenPing); // stop any ongoing pinging
DiscoveryNodes nodes = nodes(); DiscoveryNodes nodes = nodes();
if (sendLeaveRequest) { if (sendLeaveRequest) {
if (nodes.getMasterNode() == null) { if (nodes.getMasterNode() == null) {
@ -269,7 +271,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
@Override @Override
protected void doClose() throws IOException { protected void doClose() throws IOException {
IOUtils.close(masterFD, nodesFD, zenPing); IOUtils.close(masterFD, nodesFD);
} }
@Override @Override

View File

@ -19,7 +19,15 @@
package org.elasticsearch.discovery.zen; package org.elasticsearch.discovery.zen;
import java.io.Closeable; import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.unit.TimeValue;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
@ -28,17 +36,9 @@ import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicLong;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.unit.TimeValue;
import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK;
public interface ZenPing extends Closeable { public interface ZenPing extends Releasable {
void start(PingContextProvider contextProvider); void start(PingContextProvider contextProvider);

View File

@ -101,6 +101,11 @@ public final class IndexModule {
public static final Setting<Boolean> INDEX_QUERY_CACHE_EVERYTHING_SETTING = public static final Setting<Boolean> INDEX_QUERY_CACHE_EVERYTHING_SETTING =
Setting.boolSetting("index.queries.cache.everything", false, Property.IndexScope); Setting.boolSetting("index.queries.cache.everything", false, Property.IndexScope);
// This setting is an escape hatch in case not caching term queries would slow some users down
// Do not document.
public static final Setting<Boolean> INDEX_QUERY_CACHE_TERM_QUERIES_SETTING =
Setting.boolSetting("index.queries.cache.term_queries", false, Property.IndexScope);
private final IndexSettings indexSettings; private final IndexSettings indexSettings;
private final IndexStoreConfig indexStoreConfig; private final IndexStoreConfig indexStoreConfig;
private final AnalysisRegistry analysisRegistry; private final AnalysisRegistry analysisRegistry;

View File

@ -152,16 +152,15 @@ public class BooleanFieldMapper extends FieldMapper {
} else { } else {
sValue = value.toString(); sValue = value.toString();
} }
if (sValue.length() == 0) { switch (sValue) {
return Values.FALSE; case "true":
return Values.TRUE;
case "false":
return Values.FALSE;
default:
throw new IllegalArgumentException("Can't parse boolean value [" +
sValue + "], expected [true] or [false]");
} }
if (sValue.length() == 1 && sValue.charAt(0) == 'F') {
return Values.FALSE;
}
if (Booleans.parseBoolean(sValue, false)) {
return Values.TRUE;
}
return Values.FALSE;
} }
@Override @Override

View File

@ -321,14 +321,6 @@ public class DateFieldMapper extends FieldMapper {
dateParser = this.dateMathParser; dateParser = this.dateMathParser;
} }
if (PointValues.size(reader, name()) == 0) {
// no points, so nothing matches
return Relation.DISJOINT;
}
long minValue = LongPoint.decodeDimension(PointValues.getMinPackedValue(reader, name()), 0);
long maxValue = LongPoint.decodeDimension(PointValues.getMaxPackedValue(reader, name()), 0);
long fromInclusive = Long.MIN_VALUE; long fromInclusive = Long.MIN_VALUE;
if (from != null) { if (from != null) {
fromInclusive = parseToMilliseconds(from, !includeLower, timeZone, dateParser, context); fromInclusive = parseToMilliseconds(from, !includeLower, timeZone, dateParser, context);
@ -351,6 +343,17 @@ public class DateFieldMapper extends FieldMapper {
} }
} }
// This check needs to be done after fromInclusive and toInclusive
// are resolved so we can throw an exception if they are invalid
// even if there are no points in the shard
if (PointValues.size(reader, name()) == 0) {
// no points, so nothing matches
return Relation.DISJOINT;
}
long minValue = LongPoint.decodeDimension(PointValues.getMinPackedValue(reader, name()), 0);
long maxValue = LongPoint.decodeDimension(PointValues.getMaxPackedValue(reader, name()), 0);
if (minValue >= fromInclusive && maxValue <= toInclusive) { if (minValue >= fromInclusive && maxValue <= toInclusive) {
return Relation.WITHIN; return Relation.WITHIN;
} else if (maxValue < fromInclusive || minValue > toInclusive) { } else if (maxValue < fromInclusive || minValue > toInclusive) {

View File

@ -0,0 +1,56 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.shard;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryCachingPolicy;
import org.apache.lucene.search.TermQuery;
import java.io.IOException;
/**
* A {@link QueryCachingPolicy} that does not cache {@link TermQuery}s.
*/
final class ElasticsearchQueryCachingPolicy implements QueryCachingPolicy {
private final QueryCachingPolicy in;
ElasticsearchQueryCachingPolicy(QueryCachingPolicy in) {
this.in = in;
}
@Override
public void onUse(Query query) {
if (query.getClass() != TermQuery.class) {
// Do not waste space in the history for term queries. The assumption
// is that these queries are very fast so not worth caching
in.onUse(query);
}
}
@Override
public boolean shouldCache(Query query) throws IOException {
if (query.getClass() == TermQuery.class) {
return false;
}
return in.shouldCache(query);
}
}

View File

@ -270,7 +270,11 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
if (IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.get(settings)) { if (IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.get(settings)) {
cachingPolicy = QueryCachingPolicy.ALWAYS_CACHE; cachingPolicy = QueryCachingPolicy.ALWAYS_CACHE;
} else { } else {
cachingPolicy = new UsageTrackingQueryCachingPolicy(); QueryCachingPolicy cachingPolicy = new UsageTrackingQueryCachingPolicy();
if (IndexModule.INDEX_QUERY_CACHE_TERM_QUERIES_SETTING.get(settings) == false) {
cachingPolicy = new ElasticsearchQueryCachingPolicy(cachingPolicy);
}
this.cachingPolicy = cachingPolicy;
} }
indexShardOperationsLock = new IndexShardOperationsLock(shardId, logger, threadPool); indexShardOperationsLock = new IndexShardOperationsLock(shardId, logger, threadPool);
searcherWrapper = indexSearcherWrapper; searcherWrapper = indexSearcherWrapper;

View File

@ -75,9 +75,6 @@ import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.DiscoveryModule;
import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.DiscoverySettings;
import org.elasticsearch.discovery.zen.UnicastHostsProvider;
import org.elasticsearch.discovery.zen.UnicastZenPing;
import org.elasticsearch.discovery.zen.ZenPing;
import org.elasticsearch.env.Environment; import org.elasticsearch.env.Environment;
import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.gateway.GatewayAllocator;
@ -655,11 +652,13 @@ public class Node implements Closeable {
injector.getInstance(SnapshotShardsService.class).stop(); injector.getInstance(SnapshotShardsService.class).stop();
// stop any changes happening as a result of cluster state changes // stop any changes happening as a result of cluster state changes
injector.getInstance(IndicesClusterStateService.class).stop(); injector.getInstance(IndicesClusterStateService.class).stop();
// close discovery early to not react to pings anymore.
// This can confuse other nodes and delay things - mostly if we're the master and we're running tests.
injector.getInstance(Discovery.class).stop();
// we close indices first, so operations won't be allowed on it // we close indices first, so operations won't be allowed on it
injector.getInstance(IndicesTTLService.class).stop(); injector.getInstance(IndicesTTLService.class).stop();
injector.getInstance(RoutingService.class).stop(); injector.getInstance(RoutingService.class).stop();
injector.getInstance(ClusterService.class).stop(); injector.getInstance(ClusterService.class).stop();
injector.getInstance(Discovery.class).stop();
injector.getInstance(NodeConnectionsService.class).stop(); injector.getInstance(NodeConnectionsService.class).stop();
injector.getInstance(MonitorService.class).stop(); injector.getInstance(MonitorService.class).stop();
injector.getInstance(GatewayService.class).stop(); injector.getInstance(GatewayService.class).stop();

View File

@ -71,49 +71,58 @@ public abstract class BaseRestHandler extends AbstractComponent implements RestH
request.unconsumedParams().stream().filter(p -> !responseParams().contains(p)).collect(Collectors.toCollection(TreeSet::new)); request.unconsumedParams().stream().filter(p -> !responseParams().contains(p)).collect(Collectors.toCollection(TreeSet::new));
// validate the non-response params // validate the non-response params
if (unconsumedParams.isEmpty() == false) { if (!unconsumedParams.isEmpty()) {
String message = String.format( final Set<String> candidateParams = new HashSet<>();
Locale.ROOT, candidateParams.addAll(request.consumedParams());
"request [%s] contains unrecognized parameter%s: ", candidateParams.addAll(responseParams());
request.path(), throw new IllegalArgumentException(unrecognized(request, unconsumedParams, candidateParams, "parameter"));
unconsumedParams.size() > 1 ? "s" : "");
boolean first = true;
for (final String unconsumedParam : unconsumedParams) {
final LevensteinDistance ld = new LevensteinDistance();
final List<Tuple<Float, String>> scoredParams = new ArrayList<>();
final Set<String> candidateParams = new HashSet<>();
candidateParams.addAll(request.consumedParams());
candidateParams.addAll(responseParams());
for (final String candidateParam : candidateParams) {
final float distance = ld.getDistance(unconsumedParam, candidateParam);
if (distance > 0.5f) {
scoredParams.add(new Tuple<>(distance, candidateParam));
}
}
CollectionUtil.timSort(scoredParams, (a, b) -> {
// sort by distance in reverse order, then parameter name for equal distances
int compare = a.v1().compareTo(b.v1());
if (compare != 0) return -compare;
else return a.v2().compareTo(b.v2());
});
if (first == false) {
message += ", ";
}
message += "[" + unconsumedParam + "]";
final List<String> keys = scoredParams.stream().map(Tuple::v2).collect(Collectors.toList());
if (keys.isEmpty() == false) {
message += " -> did you mean " + (keys.size() == 1 ? "[" + keys.get(0) + "]": "any of " + keys.toString()) + "?";
}
first = false;
}
throw new IllegalArgumentException(message);
} }
// execute the action // execute the action
action.accept(channel); action.accept(channel);
} }
protected final String unrecognized(
final RestRequest request,
final Set<String> invalids,
final Set<String> candidates,
final String detail) {
String message = String.format(
Locale.ROOT,
"request [%s] contains unrecognized %s%s: ",
request.path(),
detail,
invalids.size() > 1 ? "s" : "");
boolean first = true;
for (final String invalid : invalids) {
final LevensteinDistance ld = new LevensteinDistance();
final List<Tuple<Float, String>> scoredParams = new ArrayList<>();
for (final String candidate : candidates) {
final float distance = ld.getDistance(invalid, candidate);
if (distance > 0.5f) {
scoredParams.add(new Tuple<>(distance, candidate));
}
}
CollectionUtil.timSort(scoredParams, (a, b) -> {
// sort by distance in reverse order, then parameter name for equal distances
int compare = a.v1().compareTo(b.v1());
if (compare != 0) return -compare;
else return a.v2().compareTo(b.v2());
});
if (first == false) {
message += ", ";
}
message += "[" + invalid + "]";
final List<String> keys = scoredParams.stream().map(Tuple::v2).collect(Collectors.toList());
if (keys.isEmpty() == false) {
message += " -> did you mean " + (keys.size() == 1 ? "[" + keys.get(0) + "]" : "any of " + keys.toString()) + "?";
}
first = false;
}
return message;
}
/** /**
* REST requests are handled by preparing a channel consumer that represents the execution of * REST requests are handled by preparing a channel consumer that represents the execution of
* the request against a channel. * the request against a channel.

View File

@ -55,7 +55,7 @@ public class RestNodesInfoAction extends BaseRestHandler {
public RestNodesInfoAction(Settings settings, RestController controller, SettingsFilter settingsFilter) { public RestNodesInfoAction(Settings settings, RestController controller, SettingsFilter settingsFilter) {
super(settings); super(settings);
controller.registerHandler(GET, "/_nodes", this); controller.registerHandler(GET, "/_nodes", this);
// this endpoint is used for metrics, not for nodeIds, like /_nodes/fs // this endpoint is used for metrics, not for node IDs, like /_nodes/fs
controller.registerHandler(GET, "/_nodes/{nodeId}", this); controller.registerHandler(GET, "/_nodes/{nodeId}", this);
controller.registerHandler(GET, "/_nodes/{nodeId}/{metrics}", this); controller.registerHandler(GET, "/_nodes/{nodeId}/{metrics}", this);
// added this endpoint to be aligned with stats // added this endpoint to be aligned with stats

View File

@ -33,7 +33,13 @@ import org.elasticsearch.rest.action.RestActions.NodesResponseRestListener;
import java.io.IOException; import java.io.IOException;
import java.util.Collections; import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Locale;
import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.TreeSet;
import java.util.function.Consumer;
import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.GET;
@ -48,9 +54,38 @@ public class RestNodesStatsAction extends BaseRestHandler {
controller.registerHandler(GET, "/_nodes/stats/{metric}", this); controller.registerHandler(GET, "/_nodes/stats/{metric}", this);
controller.registerHandler(GET, "/_nodes/{nodeId}/stats/{metric}", this); controller.registerHandler(GET, "/_nodes/{nodeId}/stats/{metric}", this);
controller.registerHandler(GET, "/_nodes/stats/{metric}/{indexMetric}", this); controller.registerHandler(GET, "/_nodes/stats/{metric}/{index_metric}", this);
controller.registerHandler(GET, "/_nodes/{nodeId}/stats/{metric}/{indexMetric}", this); controller.registerHandler(GET, "/_nodes/{nodeId}/stats/{metric}/{index_metric}", this);
}
static final Map<String, Consumer<NodesStatsRequest>> METRICS;
static {
final Map<String, Consumer<NodesStatsRequest>> metrics = new HashMap<>();
metrics.put("os", r -> r.os(true));
metrics.put("jvm", r -> r.jvm(true));
metrics.put("thread_pool", r -> r.threadPool(true));
metrics.put("fs", r -> r.fs(true));
metrics.put("transport", r -> r.transport(true));
metrics.put("http", r -> r.http(true));
metrics.put("indices", r -> r.indices(true));
metrics.put("process", r -> r.process(true));
metrics.put("breaker", r -> r.breaker(true));
metrics.put("script", r -> r.script(true));
metrics.put("discovery", r -> r.discovery(true));
metrics.put("ingest", r -> r.ingest(true));
METRICS = Collections.unmodifiableMap(metrics);
}
static final Map<String, Consumer<CommonStatsFlags>> FLAGS;
static {
final Map<String, Consumer<CommonStatsFlags>> flags = new HashMap<>();
for (final Flag flag : CommonStatsFlags.Flag.values()) {
flags.put(flag.getRestName(), f -> f.set(flag, true));
}
FLAGS = Collections.unmodifiableMap(flags);
} }
@Override @Override
@ -62,35 +97,72 @@ public class RestNodesStatsAction extends BaseRestHandler {
nodesStatsRequest.timeout(request.param("timeout")); nodesStatsRequest.timeout(request.param("timeout"));
if (metrics.size() == 1 && metrics.contains("_all")) { if (metrics.size() == 1 && metrics.contains("_all")) {
if (request.hasParam("index_metric")) {
throw new IllegalArgumentException(
String.format(
Locale.ROOT,
"request [%s] contains index metrics [%s] but all stats requested",
request.path(),
request.param("index_metric")));
}
nodesStatsRequest.all(); nodesStatsRequest.all();
nodesStatsRequest.indices(CommonStatsFlags.ALL); nodesStatsRequest.indices(CommonStatsFlags.ALL);
} else if (metrics.contains("_all")) {
throw new IllegalArgumentException(
String.format(Locale.ROOT,
"request [%s] contains _all and individual metrics [%s]",
request.path(),
request.param("metric")));
} else { } else {
nodesStatsRequest.clear(); nodesStatsRequest.clear();
nodesStatsRequest.os(metrics.contains("os"));
nodesStatsRequest.jvm(metrics.contains("jvm")); // use a sorted set so the unrecognized parameters appear in a reliable sorted order
nodesStatsRequest.threadPool(metrics.contains("thread_pool")); final Set<String> invalidMetrics = new TreeSet<>();
nodesStatsRequest.fs(metrics.contains("fs")); for (final String metric : metrics) {
nodesStatsRequest.transport(metrics.contains("transport")); final Consumer<NodesStatsRequest> handler = METRICS.get(metric);
nodesStatsRequest.http(metrics.contains("http")); if (handler != null) {
nodesStatsRequest.indices(metrics.contains("indices")); handler.accept(nodesStatsRequest);
nodesStatsRequest.process(metrics.contains("process")); } else {
nodesStatsRequest.breaker(metrics.contains("breaker")); invalidMetrics.add(metric);
nodesStatsRequest.script(metrics.contains("script")); }
nodesStatsRequest.discovery(metrics.contains("discovery")); }
nodesStatsRequest.ingest(metrics.contains("ingest"));
if (!invalidMetrics.isEmpty()) {
throw new IllegalArgumentException(unrecognized(request, invalidMetrics, METRICS.keySet(), "metric"));
}
// check for index specific metrics // check for index specific metrics
if (metrics.contains("indices")) { if (metrics.contains("indices")) {
Set<String> indexMetrics = Strings.splitStringByCommaToSet(request.param("indexMetric", "_all")); Set<String> indexMetrics = Strings.splitStringByCommaToSet(request.param("index_metric", "_all"));
if (indexMetrics.size() == 1 && indexMetrics.contains("_all")) { if (indexMetrics.size() == 1 && indexMetrics.contains("_all")) {
nodesStatsRequest.indices(CommonStatsFlags.ALL); nodesStatsRequest.indices(CommonStatsFlags.ALL);
} else { } else {
CommonStatsFlags flags = new CommonStatsFlags(); CommonStatsFlags flags = new CommonStatsFlags();
for (Flag flag : CommonStatsFlags.Flag.values()) { flags.clear();
flags.set(flag, indexMetrics.contains(flag.getRestName())); // use a sorted set so the unrecognized parameters appear in a reliable sorted order
final Set<String> invalidIndexMetrics = new TreeSet<>();
for (final String indexMetric : indexMetrics) {
final Consumer<CommonStatsFlags> handler = FLAGS.get(indexMetric);
if (handler != null) {
handler.accept(flags);
} else {
invalidIndexMetrics.add(indexMetric);
}
} }
if (!invalidIndexMetrics.isEmpty()) {
throw new IllegalArgumentException(unrecognized(request, invalidIndexMetrics, FLAGS.keySet(), "index metric"));
}
nodesStatsRequest.indices(flags); nodesStatsRequest.indices(flags);
} }
} else if (request.hasParam("index_metric")) {
throw new IllegalArgumentException(
String.format(
Locale.ROOT,
"request [%s] contains index metrics [%s] but indices stats not requested",
request.path(),
request.param("index_metric")));
} }
} }

View File

@ -36,7 +36,13 @@ import org.elasticsearch.rest.action.RestBuilderListener;
import java.io.IOException; import java.io.IOException;
import java.util.Collections; import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Locale;
import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.TreeSet;
import java.util.function.Consumer;
import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.GET;
import static org.elasticsearch.rest.RestStatus.OK; import static org.elasticsearch.rest.RestStatus.OK;
@ -49,11 +55,34 @@ public class RestIndicesStatsAction extends BaseRestHandler {
super(settings); super(settings);
controller.registerHandler(GET, "/_stats", this); controller.registerHandler(GET, "/_stats", this);
controller.registerHandler(GET, "/_stats/{metric}", this); controller.registerHandler(GET, "/_stats/{metric}", this);
controller.registerHandler(GET, "/_stats/{metric}/{indexMetric}", this);
controller.registerHandler(GET, "/{index}/_stats", this); controller.registerHandler(GET, "/{index}/_stats", this);
controller.registerHandler(GET, "/{index}/_stats/{metric}", this); controller.registerHandler(GET, "/{index}/_stats/{metric}", this);
} }
static Map<String, Consumer<IndicesStatsRequest>> METRICS;
static {
final Map<String, Consumer<IndicesStatsRequest>> metrics = new HashMap<>();
metrics.put("docs", r -> r.docs(true));
metrics.put("store", r -> r.store(true));
metrics.put("indexing", r -> r.indexing(true));
metrics.put("search", r -> r.search(true));
metrics.put("suggest", r -> r.search(true));
metrics.put("get", r -> r.get(true));
metrics.put("merge", r -> r.merge(true));
metrics.put("refresh", r -> r.refresh(true));
metrics.put("flush", r -> r.flush(true));
metrics.put("warmer", r -> r.warmer(true));
metrics.put("query_cache", r -> r.queryCache(true));
metrics.put("segments", r -> r.segments(true));
metrics.put("fielddata", r -> r.fieldData(true));
metrics.put("completion", r -> r.completion(true));
metrics.put("request_cache", r -> r.requestCache(true));
metrics.put("recovery", r -> r.recovery(true));
metrics.put("translog", r -> r.translog(true));
METRICS = Collections.unmodifiableMap(metrics);
}
@Override @Override
public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
IndicesStatsRequest indicesStatsRequest = new IndicesStatsRequest(); IndicesStatsRequest indicesStatsRequest = new IndicesStatsRequest();
@ -65,24 +94,28 @@ public class RestIndicesStatsAction extends BaseRestHandler {
// short cut, if no metrics have been specified in URI // short cut, if no metrics have been specified in URI
if (metrics.size() == 1 && metrics.contains("_all")) { if (metrics.size() == 1 && metrics.contains("_all")) {
indicesStatsRequest.all(); indicesStatsRequest.all();
} else if (metrics.contains("_all")) {
throw new IllegalArgumentException(
String.format(Locale.ROOT,
"request [%s] contains _all and individual metrics [%s]",
request.path(),
request.param("metric")));
} else { } else {
indicesStatsRequest.clear(); indicesStatsRequest.clear();
indicesStatsRequest.docs(metrics.contains("docs")); // use a sorted set so the unrecognized parameters appear in a reliable sorted order
indicesStatsRequest.store(metrics.contains("store")); final Set<String> invalidMetrics = new TreeSet<>();
indicesStatsRequest.indexing(metrics.contains("indexing")); for (final String metric : metrics) {
indicesStatsRequest.search(metrics.contains("search") || metrics.contains("suggest")); final Consumer<IndicesStatsRequest> consumer = METRICS.get(metric);
indicesStatsRequest.get(metrics.contains("get")); if (consumer != null) {
indicesStatsRequest.merge(metrics.contains("merge")); consumer.accept(indicesStatsRequest);
indicesStatsRequest.refresh(metrics.contains("refresh")); } else {
indicesStatsRequest.flush(metrics.contains("flush")); invalidMetrics.add(metric);
indicesStatsRequest.warmer(metrics.contains("warmer")); }
indicesStatsRequest.queryCache(metrics.contains("query_cache")); }
indicesStatsRequest.segments(metrics.contains("segments"));
indicesStatsRequest.fieldData(metrics.contains("fielddata")); if (!invalidMetrics.isEmpty()) {
indicesStatsRequest.completion(metrics.contains("completion")); throw new IllegalArgumentException(unrecognized(request, invalidMetrics, METRICS.keySet(), "metric"));
indicesStatsRequest.requestCache(metrics.contains("request_cache")); }
indicesStatsRequest.recovery(metrics.contains("recovery"));
indicesStatsRequest.translog(metrics.contains("translog"));
} }
if (request.hasParam("groups")) { if (request.hasParam("groups")) {

View File

@ -19,15 +19,12 @@
package org.elasticsearch; package org.elasticsearch;
import org.elasticsearch.action.ShardValidateQueryRequestTests;
import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest;
import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.query.QueryStringQueryBuilder; import org.elasticsearch.index.query.QueryStringQueryBuilder;
import org.elasticsearch.monitor.os.OsStats;
import org.elasticsearch.index.query.SimpleQueryStringBuilder; import org.elasticsearch.index.query.SimpleQueryStringBuilder;
import org.elasticsearch.monitor.os.OsStats;
import org.elasticsearch.script.Script; import org.elasticsearch.script.Script;
import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.AliasFilter;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
@ -136,8 +133,11 @@ public class VersionTests extends ESTestCase {
assertThat(Version.V_5_0_0_alpha1.minimumCompatibilityVersion(), equalTo(Version.V_5_0_0_alpha1)); assertThat(Version.V_5_0_0_alpha1.minimumCompatibilityVersion(), equalTo(Version.V_5_0_0_alpha1));
// from 6.0 on we are supporting the latest minor of the previous major... this might fail once we add a new version ie. 5.x is // from 6.0 on we are supporting the latest minor of the previous major... this might fail once we add a new version ie. 5.x is
// released since we need to bump the supported minor in Version#minimumCompatibilityVersion() // released since we need to bump the supported minor in Version#minimumCompatibilityVersion()
assertThat("did you miss to bump the minor in Version#minimumCompatibilityVersion()", Version lastVersion = VersionUtils.getPreviousVersion(Version.V_6_0_0_alpha1);
Version.V_6_0_0_alpha1.minimumCompatibilityVersion(), equalTo(VersionUtils.getPreviousVersion(Version.V_6_0_0_alpha1))); assertEquals(lastVersion.major, Version.V_6_0_0_alpha1.minimumCompatibilityVersion().major);
assertEquals("did you miss to bump the minor in Version#minimumCompatibilityVersion()",
lastVersion.minor, Version.V_6_0_0_alpha1.minimumCompatibilityVersion().minor);
assertEquals(0, Version.V_6_0_0_alpha1.minimumCompatibilityVersion().revision);
} }
public void testToString() { public void testToString() {

View File

@ -31,7 +31,8 @@ import java.io.IOException;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows;
@ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0, transportClientRatio = 0.0) @ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0, transportClientRatio = 0.0,
autoMinMasterNodes = false)
public class IndicesExistsIT extends ESIntegTestCase { public class IndicesExistsIT extends ESIntegTestCase {
public void testIndexExistsWithBlocksInPlace() throws IOException { public void testIndexExistsWithBlocksInPlace() throws IOException {

View File

@ -0,0 +1,43 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.indices.stats;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.test.ESTestCase;
import java.util.Collections;
import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.object.HasToString.hasToString;
public class IndicesStatsResponseTests extends ESTestCase {
public void testInvalidLevel() {
final IndicesStatsResponse response = new IndicesStatsResponse();
final String level = randomAsciiOfLength(16);
final ToXContent.Params params = new ToXContent.MapParams(Collections.singletonMap("level", level));
final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> response.toXContent(null, params));
assertThat(
e,
hasToString(containsString("level parameter must be one of [cluster] or [indices] or [shards] but was [" + level + "]")));
}
}

View File

@ -43,7 +43,7 @@ import java.util.concurrent.CyclicBarrier;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoMinMasterNodes = false)
public class IndexingMasterFailoverIT extends ESIntegTestCase { public class IndexingMasterFailoverIT extends ESIntegTestCase {
@Override @Override

View File

@ -63,7 +63,7 @@ import static org.hamcrest.Matchers.isOneOf;
import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.nullValue;
@ClusterScope(scope = Scope.TEST, numDataNodes = 0) @ClusterScope(scope = Scope.TEST, numDataNodes = 0, autoMinMasterNodes = false)
@TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE,org.elasticsearch.discovery.zen:TRACE") @TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE,org.elasticsearch.discovery.zen:TRACE")
public class MinimumMasterNodesIT extends ESIntegTestCase { public class MinimumMasterNodesIT extends ESIntegTestCase {
@ -275,12 +275,14 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
.put("discovery.initial_state_timeout", "500ms") .put("discovery.initial_state_timeout", "500ms")
.build(); .build();
logger.info("--> start 2 nodes"); logger.info("--> start first node and wait for it to be a master");
internalCluster().startNodesAsync(2, settings).get(); internalCluster().startNode(settings);
ensureClusterSizeConsistency();
// wait until second node join the cluster // wait until second node join the cluster
ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").get(); logger.info("--> start second node and wait for it to join");
assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); internalCluster().startNode(settings);
ensureClusterSizeConsistency();
logger.info("--> setting minimum master node to 2"); logger.info("--> setting minimum master node to 2");
setMinimumMasterNodes(2); setMinimumMasterNodes(2);
@ -298,8 +300,7 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
logger.info("--> bringing another node up"); logger.info("--> bringing another node up");
internalCluster().startNode(Settings.builder().put(settings).put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2).build()); internalCluster().startNode(Settings.builder().put(settings).put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2).build());
clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").get(); ensureClusterSizeConsistency();
assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
} }
private void assertNoMasterBlockOnAllNodes() throws InterruptedException { private void assertNoMasterBlockOnAllNodes() throws InterruptedException {

View File

@ -49,7 +49,7 @@ import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.lessThan;
@ClusterScope(scope = Scope.TEST, numDataNodes = 0) @ClusterScope(scope = Scope.TEST, numDataNodes = 0, autoMinMasterNodes = false)
public class NoMasterNodeIT extends ESIntegTestCase { public class NoMasterNodeIT extends ESIntegTestCase {
@Override @Override

View File

@ -69,7 +69,10 @@ public class AckClusterUpdateSettingsIT extends ESIntegTestCase {
private void removePublishTimeout() { private void removePublishTimeout() {
//to test that the acknowledgement mechanism is working we better disable the wait for publish //to test that the acknowledgement mechanism is working we better disable the wait for publish
//otherwise the operation is most likely acknowledged even if it doesn't support ack //otherwise the operation is most likely acknowledged even if it doesn't support ack
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "0"))); assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder()
.put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "0")
.put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "30s")
));
} }
public void testClusterUpdateSettingsAcknowledgement() { public void testClusterUpdateSettingsAcknowledgement() {

View File

@ -36,7 +36,6 @@ import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
import org.elasticsearch.common.collect.Iterators;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.DiscoverySettings;
import org.elasticsearch.index.Index; import org.elasticsearch.index.Index;
@ -61,7 +60,9 @@ public class AckIT extends ESIntegTestCase {
//to test that the acknowledgement mechanism is working we better disable the wait for publish //to test that the acknowledgement mechanism is working we better disable the wait for publish
//otherwise the operation is most likely acknowledged even if it doesn't support ack //otherwise the operation is most likely acknowledged even if it doesn't support ack
return Settings.builder().put(super.nodeSettings(nodeOrdinal)) return Settings.builder().put(super.nodeSettings(nodeOrdinal))
.put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), 0).build(); .put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "30s") // explicitly set so it won't default to publish timeout
.put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "0s") // don't wait post commit to check acking
.build();
} }
public void testUpdateSettingsAcknowledgement() { public void testUpdateSettingsAcknowledgement() {

View File

@ -30,7 +30,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationD
import org.elasticsearch.common.Priority; import org.elasticsearch.common.Priority;
import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.discovery.zen.ElectMasterService;
import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.discovery.zen.ZenDiscovery;
import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
@ -104,7 +103,6 @@ public class AwarenessAllocationIT extends ESIntegTestCase {
Settings commonSettings = Settings.builder() Settings commonSettings = Settings.builder()
.put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "zone.values", "a,b") .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "zone.values", "a,b")
.put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey(), "zone") .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey(), "zone")
.put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 3)
.put(ZenDiscovery.JOIN_TIMEOUT_SETTING.getKey(), "10s") .put(ZenDiscovery.JOIN_TIMEOUT_SETTING.getKey(), "10s")
.build(); .build();

View File

@ -20,32 +20,20 @@ package org.elasticsearch.cluster.service;
import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier; import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse; import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse;
import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterStateUpdateTask;
import org.elasticsearch.cluster.LocalNodeMasterListener;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.component.LifecycleComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.Singleton;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.discovery.zen.ZenDiscovery;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.test.ESIntegTestCase.Scope;
import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.junit.annotations.TestLogging;
import org.elasticsearch.threadpool.ThreadPool;
import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet; import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Set; import java.util.Set;
@ -56,17 +44,10 @@ import java.util.concurrent.atomic.AtomicBoolean;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.notNullValue;
@ClusterScope(scope = Scope.TEST, numDataNodes = 0) @ClusterScope(scope = Scope.TEST, numDataNodes = 0)
public class ClusterServiceIT extends ESIntegTestCase { public class ClusterServiceIT extends ESIntegTestCase {
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return Arrays.asList(TestPlugin.class);
}
public void testAckedUpdateTask() throws Exception { public void testAckedUpdateTask() throws Exception {
internalCluster().startNode(); internalCluster().startNode();
ClusterService clusterService = internalCluster().getInstance(ClusterService.class); ClusterService clusterService = internalCluster().getInstance(ClusterService.class);
@ -482,141 +463,4 @@ public class ClusterServiceIT extends ESIntegTestCase {
assertTrue(controlSources.isEmpty()); assertTrue(controlSources.isEmpty());
block2.countDown(); block2.countDown();
} }
public void testLocalNodeMasterListenerCallbacks() throws Exception {
Settings settings = Settings.builder()
.put("discovery.zen.minimum_master_nodes", 1)
.put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "400ms")
.put("discovery.initial_state_timeout", "500ms")
.build();
String node_0 = internalCluster().startNode(settings);
ClusterService clusterService = internalCluster().getInstance(ClusterService.class);
MasterAwareService testService = internalCluster().getInstance(MasterAwareService.class);
ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID)
.setWaitForNodes("1").get();
assertThat(clusterHealth.isTimedOut(), equalTo(false));
// the first node should be a master as the minimum required is 1
assertThat(clusterService.state().nodes().getMasterNode(), notNullValue());
assertThat(clusterService.state().nodes().isLocalNodeElectedMaster(), is(true));
assertThat(testService.master(), is(true));
String node_1 = internalCluster().startNode(settings);
final ClusterService clusterService1 = internalCluster().getInstance(ClusterService.class, node_1);
MasterAwareService testService1 = internalCluster().getInstance(MasterAwareService.class, node_1);
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").get();
assertThat(clusterHealth.isTimedOut(), equalTo(false));
// the second node should not be the master as node1 is already the master.
assertThat(clusterService1.state().nodes().isLocalNodeElectedMaster(), is(false));
assertThat(testService1.master(), is(false));
internalCluster().stopCurrentMasterNode();
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("1").get();
assertThat(clusterHealth.isTimedOut(), equalTo(false));
// now that node0 is closed, node1 should be elected as master
assertThat(clusterService1.state().nodes().isLocalNodeElectedMaster(), is(true));
assertThat(testService1.master(), is(true));
// start another node and set min_master_node
internalCluster().startNode(Settings.builder().put(settings));
assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut());
Settings transientSettings = Settings.builder()
.put("discovery.zen.minimum_master_nodes", 2)
.build();
client().admin().cluster().prepareUpdateSettings().setTransientSettings(transientSettings).get();
// and shutdown the second node
internalCluster().stopRandomNonMasterNode();
// there should not be any master as the minimum number of required eligible masters is not met
awaitBusy(() -> clusterService1.state().nodes().getMasterNode() == null &&
clusterService1.clusterServiceState().getClusterStateStatus() == ClusterStateStatus.APPLIED);
assertThat(testService1.master(), is(false));
// bring the node back up
String node_2 = internalCluster().startNode(Settings.builder().put(settings).put(transientSettings));
ClusterService clusterService2 = internalCluster().getInstance(ClusterService.class, node_2);
MasterAwareService testService2 = internalCluster().getInstance(MasterAwareService.class, node_2);
// make sure both nodes see each other otherwise the masternode below could be null if node 2 is master and node 1 did'r receive
// the updated cluster state...
assertThat(internalCluster().client(node_1).admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setLocal(true)
.setWaitForNodes("2").get().isTimedOut(), is(false));
assertThat(internalCluster().client(node_2).admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setLocal(true)
.setWaitForNodes("2").get().isTimedOut(), is(false));
// now that we started node1 again, a new master should be elected
assertThat(clusterService2.state().nodes().getMasterNode(), is(notNullValue()));
if (node_2.equals(clusterService2.state().nodes().getMasterNode().getName())) {
assertThat(testService1.master(), is(false));
assertThat(testService2.master(), is(true));
} else {
assertThat(testService1.master(), is(true));
assertThat(testService2.master(), is(false));
}
}
public static class TestPlugin extends Plugin {
@Override
public Collection<Class<? extends LifecycleComponent>> getGuiceServiceClasses() {
List<Class<? extends LifecycleComponent>> services = new ArrayList<>(1);
services.add(MasterAwareService.class);
return services;
}
}
@Singleton
public static class MasterAwareService extends AbstractLifecycleComponent implements LocalNodeMasterListener {
private final ClusterService clusterService;
private volatile boolean master;
@Inject
public MasterAwareService(Settings settings, ClusterService clusterService) {
super(settings);
clusterService.add(this);
this.clusterService = clusterService;
logger.info("initialized test service");
}
@Override
public void onMaster() {
logger.info("on master [{}]", clusterService.localNode());
master = true;
}
@Override
public void offMaster() {
logger.info("off master [{}]", clusterService.localNode());
master = false;
}
public boolean master() {
return master;
}
@Override
protected void doStart() {
}
@Override
protected void doStop() {
}
@Override
protected void doClose() {
}
@Override
public String executorName() {
return ThreadPool.Names.SAME;
}
}
} }

View File

@ -30,6 +30,7 @@ import org.elasticsearch.cluster.ClusterStateTaskConfig;
import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterStateTaskExecutor;
import org.elasticsearch.cluster.ClusterStateTaskListener; import org.elasticsearch.cluster.ClusterStateTaskListener;
import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterStateUpdateTask;
import org.elasticsearch.cluster.LocalNodeMasterListener;
import org.elasticsearch.cluster.NodeConnectionsService; import org.elasticsearch.cluster.NodeConnectionsService;
import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
@ -44,6 +45,7 @@ import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.BaseFuture; import org.elasticsearch.common.util.concurrent.BaseFuture;
import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.discovery.DiscoverySettings;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.test.MockLogAppender;
import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.junit.annotations.TestLogging;
@ -146,6 +148,51 @@ public class ClusterServiceTests extends ESTestCase {
return timedClusterService; return timedClusterService;
} }
public void testTimedOutUpdateTaskCleanedUp() throws Exception {
final CountDownLatch block = new CountDownLatch(1);
clusterService.submitStateUpdateTask("block-task", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
try {
block.await();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
return currentState;
}
@Override
public void onFailure(String source, Exception e) {
throw new RuntimeException(e);
}
});
final CountDownLatch block2 = new CountDownLatch(1);
clusterService.submitStateUpdateTask("test", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
block2.countDown();
return currentState;
}
@Override
public TimeValue timeout() {
return TimeValue.ZERO;
}
@Override
public void onFailure(String source, Exception e) {
block2.countDown();
}
});
block.countDown();
block2.await();
synchronized (clusterService.updateTasksPerExecutor) {
assertTrue("expected empty map but was " + clusterService.updateTasksPerExecutor,
clusterService.updateTasksPerExecutor.isEmpty());
}
}
public void testTimeoutUpdateTask() throws Exception { public void testTimeoutUpdateTask() throws Exception {
final CountDownLatch block = new CountDownLatch(1); final CountDownLatch block = new CountDownLatch(1);
clusterService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() { clusterService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() {
@ -1098,6 +1145,48 @@ public class ClusterServiceTests extends ESTestCase {
timedClusterService.close(); timedClusterService.close();
} }
public void testLocalNodeMasterListenerCallbacks() throws Exception {
TimedClusterService timedClusterService = createTimedClusterService(false);
AtomicBoolean isMaster = new AtomicBoolean();
timedClusterService.add(new LocalNodeMasterListener() {
@Override
public void onMaster() {
isMaster.set(true);
}
@Override
public void offMaster() {
isMaster.set(false);
}
@Override
public String executorName() {
return ThreadPool.Names.SAME;
}
});
ClusterState state = timedClusterService.state();
DiscoveryNodes nodes = state.nodes();
DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(nodes).masterNodeId(nodes.getLocalNodeId());
state = ClusterState.builder(state).blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK).nodes(nodesBuilder).build();
setState(timedClusterService, state);
assertThat(isMaster.get(), is(true));
nodes = state.nodes();
nodesBuilder = DiscoveryNodes.builder(nodes).masterNodeId(null);
state = ClusterState.builder(state).blocks(ClusterBlocks.builder().addGlobalBlock(DiscoverySettings.NO_MASTER_BLOCK_WRITES))
.nodes(nodesBuilder).build();
setState(timedClusterService, state);
assertThat(isMaster.get(), is(false));
nodesBuilder = DiscoveryNodes.builder(nodes).masterNodeId(nodes.getLocalNodeId());
state = ClusterState.builder(state).blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK).nodes(nodesBuilder).build();
setState(timedClusterService, state);
assertThat(isMaster.get(), is(true));
timedClusterService.close();
}
private static class SimpleTask { private static class SimpleTask {
private final int id; private final int id;

View File

@ -514,6 +514,44 @@ public class TimeZoneRoundingTests extends ESTestCase {
} }
} }
/**
* tests for dst transition with overlaps and day roundings.
*/
public void testDST_END_Edgecases() {
// First case, dst happens at 1am local time, switching back one hour.
// We want the overlapping hour to count for the next day, making it a 25h interval
DateTimeUnit timeUnit = DateTimeUnit.DAY_OF_MONTH;
DateTimeZone tz = DateTimeZone.forID("Atlantic/Azores");
Rounding rounding = new Rounding.TimeUnitRounding(timeUnit, tz);
// Sunday, 29 October 2000, 01:00:00 clocks were turned backward 1 hour
// to Sunday, 29 October 2000, 00:00:00 local standard time instead
long midnightBeforeTransition = time("2000-10-29T00:00:00", tz);
long nextMidnight = time("2000-10-30T00:00:00", tz);
assertInterval(midnightBeforeTransition, nextMidnight, rounding, 25 * 60, tz);
// Second case, dst happens at 0am local time, switching back one hour to 23pm local time.
// We want the overlapping hour to count for the previous day here
tz = DateTimeZone.forID("America/Lima");
rounding = new Rounding.TimeUnitRounding(timeUnit, tz);
// Sunday, 1 April 1990, 00:00:00 clocks were turned backward 1 hour to
// Saturday, 31 March 1990, 23:00:00 local standard time instead
midnightBeforeTransition = time("1990-03-31T00:00:00.000-04:00");
nextMidnight = time("1990-04-01T00:00:00.000-05:00");
assertInterval(midnightBeforeTransition, nextMidnight, rounding, 25 * 60, tz);
// make sure the next interval is 24h long again
long midnightAfterTransition = time("1990-04-01T00:00:00.000-05:00");
nextMidnight = time("1990-04-02T00:00:00.000-05:00");
assertInterval(midnightAfterTransition, nextMidnight, rounding, 24 * 60, tz);
}
/** /**
* Test that time zones are correctly parsed. There is a bug with * Test that time zones are correctly parsed. There is a bug with
* Joda 2.9.4 (see https://github.com/JodaOrg/joda-time/issues/373) * Joda 2.9.4 (see https://github.com/JodaOrg/joda-time/issues/373)

View File

@ -122,7 +122,7 @@ import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.nullValue;
@ClusterScope(scope = Scope.TEST, numDataNodes = 0, transportClientRatio = 0) @ClusterScope(scope = Scope.TEST, numDataNodes = 0, transportClientRatio = 0, autoMinMasterNodes = false)
@TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE") @TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE")
public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {

View File

@ -34,7 +34,7 @@ import java.util.concurrent.ExecutionException;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
@ClusterScope(scope = Scope.TEST, numDataNodes = 0) @ClusterScope(scope = Scope.TEST, numDataNodes = 0, autoMinMasterNodes = false)
public class ZenUnicastDiscoveryIT extends ESIntegTestCase { public class ZenUnicastDiscoveryIT extends ESIntegTestCase {
private ClusterDiscoveryConfiguration discoveryConfig; private ClusterDiscoveryConfiguration discoveryConfig;

View File

@ -23,7 +23,6 @@ import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.client.Client; import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.discovery.zen.ElectMasterService;
import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.test.ESIntegTestCase.Scope;
@ -37,7 +36,7 @@ import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
@ClusterScope(numDataNodes =0, scope= Scope.TEST) @ClusterScope(numDataNodes = 0, scope = Scope.TEST)
public class QuorumGatewayIT extends ESIntegTestCase { public class QuorumGatewayIT extends ESIntegTestCase {
@Override @Override
protected int numberOfReplicas() { protected int numberOfReplicas() {
@ -47,8 +46,7 @@ public class QuorumGatewayIT extends ESIntegTestCase {
public void testQuorumRecovery() throws Exception { public void testQuorumRecovery() throws Exception {
logger.info("--> starting 3 nodes"); logger.info("--> starting 3 nodes");
// we are shutting down nodes - make sure we don't have 2 clusters if we test network // we are shutting down nodes - make sure we don't have 2 clusters if we test network
internalCluster().startNodesAsync(3, internalCluster().startNodesAsync(3).get();
Settings.builder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2).build()).get();
createIndex("test"); createIndex("test");

View File

@ -34,7 +34,7 @@ import java.util.Set;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasItem;
@ClusterScope(scope = Scope.TEST, numDataNodes = 0) @ClusterScope(scope = Scope.TEST, numDataNodes = 0, autoMinMasterNodes = false)
public class RecoverAfterNodesIT extends ESIntegTestCase { public class RecoverAfterNodesIT extends ESIntegTestCase {
private static final TimeValue BLOCK_WAIT_TIMEOUT = TimeValue.timeValueSeconds(10); private static final TimeValue BLOCK_WAIT_TIMEOUT = TimeValue.timeValueSeconds(10);

View File

@ -23,7 +23,6 @@ import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse;
import org.elasticsearch.action.admin.indices.stats.IndexStats; import org.elasticsearch.action.admin.indices.stats.IndexStats;
import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.admin.indices.stats.ShardStats;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
@ -44,6 +43,7 @@ import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.test.ESIntegTestCase.Scope;
import org.elasticsearch.test.InternalTestCluster;
import org.elasticsearch.test.InternalTestCluster.RestartCallback; import org.elasticsearch.test.InternalTestCluster.RestartCallback;
import org.elasticsearch.test.store.MockFSDirectoryService; import org.elasticsearch.test.store.MockFSDirectoryService;
import org.elasticsearch.test.store.MockFSIndexStore; import org.elasticsearch.test.store.MockFSIndexStore;
@ -333,48 +333,43 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase {
String metaDataUuid = client().admin().cluster().prepareState().execute().get().getState().getMetaData().clusterUUID(); String metaDataUuid = client().admin().cluster().prepareState().execute().get().getState().getMetaData().clusterUUID();
assertThat(metaDataUuid, not(equalTo("_na_"))); assertThat(metaDataUuid, not(equalTo("_na_")));
Map<String, long[]> primaryTerms = assertAndCapturePrimaryTerms(null);
logger.info("--> closing first node, and indexing more data to the second node"); logger.info("--> closing first node, and indexing more data to the second node");
internalCluster().fullRestart(new RestartCallback() { internalCluster().stopRandomDataNode();
@Override logger.info("--> one node is closed - start indexing data into the second one");
public void doAfterNodes(int numNodes, Client client) throws Exception { client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject().field("field", "value3").endObject()).execute().actionGet();
if (numNodes == 1) { // TODO: remove once refresh doesn't fail immediately if there a master block:
logger.info("--> one node is closed - start indexing data into the second one"); // https://github.com/elastic/elasticsearch/issues/9997
client.prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject().field("field", "value3").endObject()).execute().actionGet(); client().admin().cluster().prepareHealth("test").setWaitForYellowStatus().get();
// TODO: remove once refresh doesn't fail immediately if there a master block: client().admin().indices().prepareRefresh().execute().actionGet();
// https://github.com/elastic/elasticsearch/issues/9997
client.admin().cluster().prepareHealth("test").setWaitForYellowStatus().get();
client.admin().indices().prepareRefresh().execute().actionGet();
for (int i = 0; i < 10; i++) { for (int i = 0; i < 10; i++) {
assertHitCount(client.prepareSearch().setSize(0).setQuery(matchAllQuery()).execute().actionGet(), 3); assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).execute().actionGet(), 3);
} }
logger.info("--> add some metadata, additional type and template"); logger.info("--> add some metadata, additional type and template");
client.admin().indices().preparePutMapping("test").setType("type2") client().admin().indices().preparePutMapping("test").setType("type2")
.setSource(jsonBuilder().startObject().startObject("type2").endObject().endObject()) .setSource(jsonBuilder().startObject().startObject("type2").endObject().endObject())
.execute().actionGet(); .execute().actionGet();
client.admin().indices().preparePutTemplate("template_1") client().admin().indices().preparePutTemplate("template_1")
.setPatterns(Collections.singletonList("te*")) .setTemplate("te*")
.setOrder(0) .setOrder(0)
.addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties") .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
.startObject("field1").field("type", "text").field("store", true).endObject() .startObject("field1").field("type", "text").field("store", true).endObject()
.startObject("field2").field("type", "keyword").field("store", true).endObject() .startObject("field2").field("type", "keyword").field("store", true).endObject()
.endObject().endObject().endObject()) .endObject().endObject().endObject())
.execute().actionGet(); .execute().actionGet();
client.admin().indices().prepareAliases().addAlias("test", "test_alias", QueryBuilders.termQuery("field", "value")).execute().actionGet(); client().admin().indices().prepareAliases().addAlias("test", "test_alias", QueryBuilders.termQuery("field", "value")).execute().actionGet();
logger.info("--> starting two nodes back, verifying we got the latest version");
}
} logger.info("--> stopping the second node");
internalCluster().stopRandomDataNode();
}); logger.info("--> starting the two nodes back");
internalCluster().startNodesAsync(2, Settings.builder().put("gateway.recover_after_nodes", 2).build()).get();
logger.info("--> running cluster_health (wait for the shards to startup)"); logger.info("--> running cluster_health (wait for the shards to startup)");
ensureGreen(); ensureGreen();
primaryTerms = assertAndCapturePrimaryTerms(primaryTerms);
assertThat(client().admin().cluster().prepareState().execute().get().getState().getMetaData().clusterUUID(), equalTo(metaDataUuid)); assertThat(client().admin().cluster().prepareState().execute().get().getState().getMetaData().clusterUUID(), equalTo(metaDataUuid));
@ -502,27 +497,28 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase {
public void testRecoveryDifferentNodeOrderStartup() throws Exception { public void testRecoveryDifferentNodeOrderStartup() throws Exception {
// we need different data paths so we make sure we start the second node fresh // we need different data paths so we make sure we start the second node fresh
final String node_1 = internalCluster().startNode(Settings.builder().put(Environment.PATH_DATA_SETTING.getKey(), createTempDir()).build()); final Path pathNode1 = createTempDir();
final String node_1 = internalCluster().startNode(Settings.builder().put(Environment.PATH_DATA_SETTING.getKey(), pathNode1).build());
client().prepareIndex("test", "type1", "1").setSource("field", "value").execute().actionGet(); client().prepareIndex("test", "type1", "1").setSource("field", "value").execute().actionGet();
internalCluster().startNode(Settings.builder().put(Environment.PATH_DATA_SETTING.getKey(), createTempDir()).build()); final Path pathNode2 = createTempDir();
final String node_2 = internalCluster().startNode(Settings.builder().put(Environment.PATH_DATA_SETTING.getKey(), pathNode2).build());
ensureGreen(); ensureGreen();
Map<String, long[]> primaryTerms = assertAndCapturePrimaryTerms(null); Map<String, long[]> primaryTerms = assertAndCapturePrimaryTerms(null);
if (randomBoolean()) {
internalCluster().fullRestart(new RestartCallback() { internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node_1));
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node_2));
@Override } else {
public boolean doRestart(String nodeName) { internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node_2));
return !node_1.equals(nodeName); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node_1));
} }
}); // start the second node again
internalCluster().startNode(Settings.builder().put(Environment.PATH_DATA_SETTING.getKey(), pathNode2).build());
ensureYellow(); ensureYellow();
primaryTerms = assertAndCapturePrimaryTerms(primaryTerms); primaryTerms = assertAndCapturePrimaryTerms(primaryTerms);
assertThat(client().admin().indices().prepareExists("test").execute().actionGet().isExists(), equalTo(true)); assertThat(client().admin().indices().prepareExists("test").execute().actionGet().isExists(), equalTo(true));
assertHitCount(client().prepareSearch("test").setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet(), 1); assertHitCount(client().prepareSearch("test").setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet(), 1);
} }

View File

@ -73,11 +73,12 @@ public class DateFieldTypeTests extends FieldTypeTestCase {
} }
public void testIsFieldWithinQueryEmptyReader() throws IOException { public void testIsFieldWithinQueryEmptyReader() throws IOException {
QueryRewriteContext context = new QueryRewriteContext(null, null, null, null, null, null, null, () -> nowInMillis);
IndexReader reader = new MultiReader(); IndexReader reader = new MultiReader();
DateFieldType ft = new DateFieldType(); DateFieldType ft = new DateFieldType();
ft.setName("my_date"); ft.setName("my_date");
assertEquals(Relation.DISJOINT, ft.isFieldWithinQuery(reader, "2015-10-12", "2016-04-03", assertEquals(Relation.DISJOINT, ft.isFieldWithinQuery(reader, "2015-10-12", "2016-04-03",
randomBoolean(), randomBoolean(), null, null, null)); randomBoolean(), randomBoolean(), null, null, context));
} }
private void doTestIsFieldWithinQuery(DateFieldType ft, DirectoryReader reader, private void doTestIsFieldWithinQuery(DateFieldType ft, DirectoryReader reader,
@ -128,7 +129,9 @@ public class DateFieldTypeTests extends FieldTypeTestCase {
// Fields with no value indexed. // Fields with no value indexed.
DateFieldType ft2 = new DateFieldType(); DateFieldType ft2 = new DateFieldType();
ft2.setName("my_date2"); ft2.setName("my_date2");
assertEquals(Relation.DISJOINT, ft2.isFieldWithinQuery(reader, "2015-10-09", "2016-01-02", false, false, null, null, null));
QueryRewriteContext context = new QueryRewriteContext(null, null, null, null, null, null, null, () -> nowInMillis);
assertEquals(Relation.DISJOINT, ft2.isFieldWithinQuery(reader, "2015-10-09", "2016-01-02", false, false, null, null, context));
IOUtils.close(reader, w, dir); IOUtils.close(reader, w, dir);
} }

View File

@ -106,7 +106,7 @@ public class ExternalValuesMapperIntegrationIT extends ESIntegTestCase {
SearchResponse response; SearchResponse response;
response = client().prepareSearch("test-idx") response = client().prepareSearch("test-idx")
.setPostFilter(QueryBuilders.termQuery("field.bool", "T")) .setPostFilter(QueryBuilders.termQuery("field.bool", "true"))
.execute().actionGet(); .execute().actionGet();
assertThat(response.getHits().totalHits(), equalTo((long) 1)); assertThat(response.getHits().totalHits(), equalTo((long) 1));

View File

@ -0,0 +1,61 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.shard;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryCachingPolicy;
import org.apache.lucene.search.TermQuery;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
public class ElasticsearchQueryCachingPolicyTests extends ESTestCase {
public void testDoesNotCacheTermQueries() throws IOException {
QueryCachingPolicy policy = QueryCachingPolicy.ALWAYS_CACHE;
assertTrue(policy.shouldCache(new TermQuery(new Term("foo", "bar"))));
assertTrue(policy.shouldCache(new PhraseQuery("foo", "bar", "baz")));
policy = new ElasticsearchQueryCachingPolicy(policy);
assertFalse(policy.shouldCache(new TermQuery(new Term("foo", "bar"))));
assertTrue(policy.shouldCache(new PhraseQuery("foo", "bar", "baz")));
}
public void testDoesNotPutTermQueriesIntoTheHistory() {
boolean[] used = new boolean[1];
QueryCachingPolicy policy = new QueryCachingPolicy() {
@Override
public boolean shouldCache(Query query) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public void onUse(Query query) {
used[0] = true;
}
};
policy = new ElasticsearchQueryCachingPolicy(policy);
policy.onUse(new TermQuery(new Term("foo", "bar")));
assertFalse(used[0]);
policy.onUse(new PhraseQuery("foo", "bar", "baz"));
assertTrue(used[0]);
}
}

View File

@ -555,10 +555,8 @@ public class IndexRecoveryIT extends ESIntegTestCase {
// start a master node // start a master node
internalCluster().startNode(nodeSettings); internalCluster().startNode(nodeSettings);
InternalTestCluster.Async<String> blueFuture = internalCluster().startNodeAsync(Settings.builder().put("node.attr.color", "blue").put(nodeSettings).build()); final String blueNodeName = internalCluster().startNode(Settings.builder().put("node.attr.color", "blue").put(nodeSettings).build());
InternalTestCluster.Async<String> redFuture = internalCluster().startNodeAsync(Settings.builder().put("node.attr.color", "red").put(nodeSettings).build()); final String redNodeName = internalCluster().startNode(Settings.builder().put("node.attr.color", "red").put(nodeSettings).build());
final String blueNodeName = blueFuture.get();
final String redNodeName = redFuture.get();
ClusterHealthResponse response = client().admin().cluster().prepareHealth().setWaitForNodes(">=3").get(); ClusterHealthResponse response = client().admin().cluster().prepareHealth().setWaitForNodes(">=3").get();
assertThat(response.isTimedOut(), is(false)); assertThat(response.isTimedOut(), is(false));

View File

@ -209,7 +209,10 @@ public class RareClusterStateIT extends ESIntegTestCase {
// but the change might not be on the node that performed the indexing // but the change might not be on the node that performed the indexing
// operation yet // operation yet
Settings settings = Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "0ms").build(); Settings settings = Settings.builder()
.put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "30s") // explicitly set so it won't default to publish timeout
.put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "0s") // don't wait post commit as we are blocking things by design
.build();
final List<String> nodeNames = internalCluster().startNodesAsync(2, settings).get(); final List<String> nodeNames = internalCluster().startNodesAsync(2, settings).get();
assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut()); assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut());
@ -327,7 +330,6 @@ public class RareClusterStateIT extends ESIntegTestCase {
// time of indexing it // time of indexing it
final List<String> nodeNames = internalCluster().startNodesAsync(2, final List<String> nodeNames = internalCluster().startNodesAsync(2,
Settings.builder() Settings.builder()
.put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2)
.put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "30s") // explicitly set so it won't default to publish timeout .put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "30s") // explicitly set so it won't default to publish timeout
.put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "0s") // don't wait post commit as we are blocking things by design .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "0s") // don't wait post commit as we are blocking things by design
.build()).get(); .build()).get();

View File

@ -72,16 +72,15 @@ public class FullRollingRestartIT extends ESIntegTestCase {
} }
logger.info("--> now start adding nodes"); logger.info("--> now start adding nodes");
internalCluster().startNodesAsync(2, settings).get(); internalCluster().startNode(settings);
internalCluster().startNode(settings);
// make sure the cluster state is green, and all has been recovered // make sure the cluster state is green, and all has been recovered
assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout).setWaitForGreenStatus().setWaitForNoRelocatingShards(true).setWaitForNodes("3")); assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout).setWaitForGreenStatus().setWaitForNoRelocatingShards(true).setWaitForNodes("3"));
logger.info("--> add two more nodes"); logger.info("--> add two more nodes");
internalCluster().startNodesAsync(2, settings).get(); internalCluster().startNode(settings);
internalCluster().startNode(settings);
// We now have 5 nodes
setMinimumMasterNodes(3);
// make sure the cluster state is green, and all has been recovered // make sure the cluster state is green, and all has been recovered
assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout).setWaitForGreenStatus().setWaitForNoRelocatingShards(true).setWaitForNodes("5")); assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout).setWaitForGreenStatus().setWaitForNoRelocatingShards(true).setWaitForNodes("5"));
@ -97,9 +96,6 @@ public class FullRollingRestartIT extends ESIntegTestCase {
// make sure the cluster state is green, and all has been recovered // make sure the cluster state is green, and all has been recovered
assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout).setWaitForGreenStatus().setWaitForNoRelocatingShards(true).setWaitForNodes("4")); assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout).setWaitForGreenStatus().setWaitForNoRelocatingShards(true).setWaitForNodes("4"));
// going down to 3 nodes. note that the min_master_node may not be in effect when we shutdown the 4th
// node, but that's OK as it is set to 3 before.
setMinimumMasterNodes(2);
internalCluster().stopRandomDataNode(); internalCluster().stopRandomDataNode();
// make sure the cluster state is green, and all has been recovered // make sure the cluster state is green, and all has been recovered
assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout).setWaitForGreenStatus().setWaitForNoRelocatingShards(true).setWaitForNodes("3")); assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout).setWaitForGreenStatus().setWaitForNoRelocatingShards(true).setWaitForNodes("3"));
@ -115,8 +111,6 @@ public class FullRollingRestartIT extends ESIntegTestCase {
// make sure the cluster state is green, and all has been recovered // make sure the cluster state is green, and all has been recovered
assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout).setWaitForGreenStatus().setWaitForNoRelocatingShards(true).setWaitForNodes("2")); assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout).setWaitForGreenStatus().setWaitForNoRelocatingShards(true).setWaitForNodes("2"));
// closing the 2nd node
setMinimumMasterNodes(1);
internalCluster().stopRandomDataNode(); internalCluster().stopRandomDataNode();
// make sure the cluster state is yellow, and all has been recovered // make sure the cluster state is yellow, and all has been recovered

View File

@ -44,8 +44,8 @@ import org.elasticsearch.index.shard.IndexEventListener;
import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.IndexShardState;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.recovery.RecoveryFileChunkRequest;
import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; import org.elasticsearch.indices.recovery.PeerRecoveryTargetService;
import org.elasticsearch.indices.recovery.RecoveryFileChunkRequest;
import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchHits;
@ -53,7 +53,6 @@ import org.elasticsearch.test.BackgroundIndexer;
import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.test.ESIntegTestCase.Scope;
import org.elasticsearch.test.InternalTestCluster;
import org.elasticsearch.test.MockIndexEventListener; import org.elasticsearch.test.MockIndexEventListener;
import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.junit.annotations.TestLogging;
import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.test.transport.MockTransportService;
@ -77,6 +76,7 @@ import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutionException;
import java.util.concurrent.Semaphore; import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.stream.Stream;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
@ -351,7 +351,8 @@ public class RelocationIT extends ESIntegTestCase {
client().admin().indices().prepareCreate(indexName) client().admin().indices().prepareCreate(indexName)
.setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1, IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)).get(); .setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1, IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)).get();
internalCluster().startNodesAsync(2).get(); internalCluster().startNode();
internalCluster().startNode();
List<IndexRequestBuilder> requests = new ArrayList<>(); List<IndexRequestBuilder> requests = new ArrayList<>();
int numDocs = scaledRandomIntBetween(25, 250); int numDocs = scaledRandomIntBetween(25, 250);
@ -424,14 +425,15 @@ public class RelocationIT extends ESIntegTestCase {
public void testIndexAndRelocateConcurrently() throws ExecutionException, InterruptedException { public void testIndexAndRelocateConcurrently() throws ExecutionException, InterruptedException {
int halfNodes = randomIntBetween(1, 3); int halfNodes = randomIntBetween(1, 3);
Settings blueSetting = Settings.builder().put("node.attr.color", "blue").build(); Settings[] nodeSettings = Stream.concat(
InternalTestCluster.Async<List<String>> blueFuture = internalCluster().startNodesAsync(halfNodes, blueSetting); Stream.generate(() -> Settings.builder().put("node.attr.color", "blue").build()).limit(halfNodes),
Settings redSetting = Settings.builder().put("node.attr.color", "red").build(); Stream.generate(() -> Settings.builder().put("node.attr.color", "red").build()).limit(halfNodes)
InternalTestCluster.Async<java.util.List<String>> redFuture = internalCluster().startNodesAsync(halfNodes, redSetting); ).toArray(Settings[]::new);
blueFuture.get(); List<String> nodes = internalCluster().startNodesAsync(nodeSettings).get();
redFuture.get(); String[] blueNodes = nodes.subList(0, halfNodes).stream().toArray(String[]::new);
logger.info("blue nodes: {}", blueFuture.get()); String[] redNodes = nodes.subList(halfNodes, nodes.size()).stream().toArray(String[]::new);
logger.info("red nodes: {}", redFuture.get()); logger.info("blue nodes: {}", (Object)blueNodes);
logger.info("red nodes: {}", (Object)redNodes);
ensureStableCluster(halfNodes * 2); ensureStableCluster(halfNodes * 2);
assertAcked(prepareCreate("test").setSettings(Settings.builder() assertAcked(prepareCreate("test").setSettings(Settings.builder()
@ -439,7 +441,7 @@ public class RelocationIT extends ESIntegTestCase {
.put(indexSettings()) .put(indexSettings())
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomInt(halfNodes - 1)) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomInt(halfNodes - 1))
)); ));
assertAllShardsOnNodes("test", redFuture.get().toArray(new String[2])); assertAllShardsOnNodes("test", redNodes);
int numDocs = randomIntBetween(100, 150); int numDocs = randomIntBetween(100, 150);
ArrayList<String> ids = new ArrayList<>(); ArrayList<String> ids = new ArrayList<>();
logger.info(" --> indexing [{}] docs", numDocs); logger.info(" --> indexing [{}] docs", numDocs);

View File

@ -0,0 +1,144 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.rest.action.admin.cluster;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.rest.FakeRestRequest;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Set;
import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.object.HasToString.hasToString;
import static org.mockito.Mockito.mock;
public class RestNodesStatsActionTests extends ESTestCase {
private RestNodesStatsAction action;
@Override
public void setUp() throws Exception {
super.setUp();
action = new RestNodesStatsAction(Settings.EMPTY, new RestController(Settings.EMPTY, Collections.emptySet()));
}
public void testUnrecognizedMetric() throws IOException {
final HashMap<String, String> params = new HashMap<>();
final String metric = randomAsciiOfLength(64);
params.put("metric", metric);
final RestRequest request = new FakeRestRequest.Builder().withPath("/_nodes/stats").withParams(params).build();
final IllegalArgumentException e = expectThrows(
IllegalArgumentException.class,
() -> action.prepareRequest(request, mock(NodeClient.class)));
assertThat(e, hasToString(containsString("request [/_nodes/stats] contains unrecognized metric: [" + metric + "]")));
}
public void testUnrecognizedMetricDidYouMean() throws IOException {
final HashMap<String, String> params = new HashMap<>();
params.put("metric", "os,transprot,unrecognized");
final RestRequest request = new FakeRestRequest.Builder().withPath("/_nodes/stats").withParams(params).build();
final IllegalArgumentException e = expectThrows(
IllegalArgumentException.class,
() -> action.prepareRequest(request, mock(NodeClient.class)));
assertThat(
e,
hasToString(
containsString(
"request [/_nodes/stats] contains unrecognized metrics: [transprot] -> did you mean [transport]?, [unrecognized]")));
}
public void testAllRequestWithOtherMetrics() throws IOException {
final HashMap<String, String> params = new HashMap<>();
final String metric = randomSubsetOf(1, RestNodesStatsAction.METRICS.keySet()).get(0);
params.put("metric", "_all," + metric);
final RestRequest request = new FakeRestRequest.Builder().withPath("/_nodes/stats").withParams(params).build();
final IllegalArgumentException e = expectThrows(
IllegalArgumentException.class,
() -> action.prepareRequest(request, mock(NodeClient.class)));
assertThat(e, hasToString(containsString("request [/_nodes/stats] contains _all and individual metrics [_all," + metric + "]")));
}
public void testUnrecognizedIndexMetric() {
final HashMap<String, String> params = new HashMap<>();
params.put("metric", "indices");
final String indexMetric = randomAsciiOfLength(64);
params.put("index_metric", indexMetric);
final RestRequest request = new FakeRestRequest.Builder().withPath("/_nodes/stats").withParams(params).build();
final IllegalArgumentException e = expectThrows(
IllegalArgumentException.class,
() -> action.prepareRequest(request, mock(NodeClient.class)));
assertThat(e, hasToString(containsString("request [/_nodes/stats] contains unrecognized index metric: [" + indexMetric + "]")));
}
public void testUnrecognizedIndexMetricDidYouMean() {
final HashMap<String, String> params = new HashMap<>();
params.put("metric", "indices");
params.put("index_metric", "indexing,stroe,unrecognized");
final RestRequest request = new FakeRestRequest.Builder().withPath("/_nodes/stats").withParams(params).build();
final IllegalArgumentException e = expectThrows(
IllegalArgumentException.class,
() -> action.prepareRequest(request, mock(NodeClient.class)));
assertThat(
e,
hasToString(
containsString(
"request [/_nodes/stats] contains unrecognized index metrics: [stroe] -> did you mean [store]?, [unrecognized]")));
}
public void testIndexMetricsRequestWithoutIndicesMetric() throws IOException {
final HashMap<String, String> params = new HashMap<>();
final Set<String> metrics = new HashSet<>(RestNodesStatsAction.METRICS.keySet());
metrics.remove("indices");
params.put("metric", randomSubsetOf(1, metrics).get(0));
final String indexMetric = randomSubsetOf(1, RestNodesStatsAction.FLAGS.keySet()).get(0);
params.put("index_metric", indexMetric);
final RestRequest request = new FakeRestRequest.Builder().withPath("/_nodes/stats").withParams(params).build();
final IllegalArgumentException e = expectThrows(
IllegalArgumentException.class,
() -> action.prepareRequest(request, mock(NodeClient.class)));
assertThat(
e,
hasToString(
containsString("request [/_nodes/stats] contains index metrics [" + indexMetric + "] but indices stats not requested")));
}
public void testIndexMetricsRequestOnAllRequest() throws IOException {
final HashMap<String, String> params = new HashMap<>();
params.put("metric", "_all");
final String indexMetric = randomSubsetOf(1, RestNodesStatsAction.FLAGS.keySet()).get(0);
params.put("index_metric", indexMetric);
final RestRequest request = new FakeRestRequest.Builder().withPath("/_nodes/stats").withParams(params).build();
final IllegalArgumentException e = expectThrows(
IllegalArgumentException.class,
() -> action.prepareRequest(request, mock(NodeClient.class)));
assertThat(
e,
hasToString(
containsString("request [/_nodes/stats] contains index metrics [" + indexMetric + "] but all stats requested")));
}
}

View File

@ -0,0 +1,83 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.rest.action.admin.indices;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.rest.FakeRestRequest;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.object.HasToString.hasToString;
import static org.mockito.Mockito.mock;
public class RestIndicesStatsActionTests extends ESTestCase {
private RestIndicesStatsAction action;
@Override
public void setUp() throws Exception {
super.setUp();
action = new RestIndicesStatsAction(Settings.EMPTY, new RestController(Settings.EMPTY, Collections.emptySet()));
}
public void testUnrecognizedMetric() throws IOException {
final HashMap<String, String> params = new HashMap<>();
final String metric = randomAsciiOfLength(64);
params.put("metric", metric);
final RestRequest request = new FakeRestRequest.Builder().withPath("/_stats").withParams(params).build();
final IllegalArgumentException e = expectThrows(
IllegalArgumentException.class,
() -> action.prepareRequest(request, mock(NodeClient.class)));
assertThat(e, hasToString(containsString("request [/_stats] contains unrecognized metric: [" + metric + "]")));
}
public void testUnrecognizedMetricDidYouMean() throws IOException {
final HashMap<String, String> params = new HashMap<>();
params.put("metric", "request_cache,fieldata,unrecognized");
final RestRequest request = new FakeRestRequest.Builder().withPath("/_stats").withParams(params).build();
final IllegalArgumentException e = expectThrows(
IllegalArgumentException.class,
() -> action.prepareRequest(request, mock(NodeClient.class)));
assertThat(
e,
hasToString(
containsString(
"request [/_stats] contains unrecognized metrics: [fieldata] -> did you mean [fielddata]?, [unrecognized]")));
}
public void testAllRequestWithOtherMetrics() throws IOException {
final HashMap<String, String> params = new HashMap<>();
final String metric = randomSubsetOf(1, RestIndicesStatsAction.METRICS.keySet()).get(0);
params.put("metric", "_all," + metric);
final RestRequest request = new FakeRestRequest.Builder().withPath("/_stats").withParams(params).build();
final IllegalArgumentException e = expectThrows(
IllegalArgumentException.class,
() -> action.prepareRequest(request, mock(NodeClient.class)));
assertThat(e, hasToString(containsString("request [/_stats] contains _all and individual metrics [_all," + metric + "]")));
}
}

View File

@ -247,6 +247,14 @@ public class QueryStringIT extends ESIntegTestCase {
assertHitCount(resp, 1L); assertHitCount(resp, 1L);
} }
public void testBooleanStrictQuery() throws Exception {
Exception e = expectThrows(Exception.class, () ->
client().prepareSearch("test").setQuery(
queryStringQuery("foo").field("f_bool")).get());
assertThat(ExceptionsHelper.detailedMessage(e),
containsString("Can't parse boolean value [foo], expected [true] or [false]"));
}
private void assertHits(SearchHits hits, String... ids) { private void assertHits(SearchHits hits, String... ids) {
assertThat(hits.totalHits(), equalTo((long) ids.length)); assertThat(hits.totalHits(), equalTo((long) ids.length));
Set<String> hitIds = new HashSet<>(); Set<String> hitIds = new HashSet<>();

View File

@ -1904,7 +1904,6 @@ public class SearchQueryIT extends ESIntegTestCase {
assertHitCount(client().prepareSearch("test").setSize(0).setQuery(rangeQuery("field").lte(-999999999999L)).get(), 3); assertHitCount(client().prepareSearch("test").setSize(0).setQuery(rangeQuery("field").lte(-999999999999L)).get(), 3);
} }
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/21501")
public void testRangeQueryWithTimeZone() throws Exception { public void testRangeQueryWithTimeZone() throws Exception {
assertAcked(prepareCreate("test") assertAcked(prepareCreate("test")
.addMapping("type1", "date", "type=date", "num", "type=integer")); .addMapping("type1", "date", "type=date", "num", "type=integer"));

View File

@ -19,18 +19,6 @@
package org.elasticsearch.tribe; package org.elasticsearch.tribe;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.stream.Stream;
import java.util.stream.StreamSupport;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.support.DestructiveOperations; import org.elasticsearch.action.support.DestructiveOperations;
import org.elasticsearch.client.Client; import org.elasticsearch.client.Client;
@ -58,6 +46,18 @@ import org.junit.After;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.Before; import org.junit.Before;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.stream.Stream;
import java.util.stream.StreamSupport;
import static java.util.stream.Collectors.toSet; import static java.util.stream.Collectors.toSet;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
@ -121,13 +121,13 @@ public class TribeIT extends ESIntegTestCase {
final Collection<Class<? extends Plugin>> plugins = nodePlugins(); final Collection<Class<? extends Plugin>> plugins = nodePlugins();
if (cluster1 == null) { if (cluster1 == null) {
cluster1 = new InternalTestCluster(randomLong(), createTempDir(), true, minNumDataNodes, maxNumDataNodes, cluster1 = new InternalTestCluster(randomLong(), createTempDir(), true, true, minNumDataNodes, maxNumDataNodes,
UUIDs.randomBase64UUID(random()), nodeConfigurationSource, 0, false, "cluster_1", UUIDs.randomBase64UUID(random()), nodeConfigurationSource, 0, false, "cluster_1",
plugins, Function.identity()); plugins, Function.identity());
} }
if (cluster2 == null) { if (cluster2 == null) {
cluster2 = new InternalTestCluster(randomLong(), createTempDir(), true, minNumDataNodes, maxNumDataNodes, cluster2 = new InternalTestCluster(randomLong(), createTempDir(), true, true, minNumDataNodes, maxNumDataNodes,
UUIDs.randomBase64UUID(random()), nodeConfigurationSource, 0, false, "cluster_2", UUIDs.randomBase64UUID(random()), nodeConfigurationSource, 0, false, "cluster_2",
plugins, Function.identity()); plugins, Function.identity());
} }

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1 @@
5f01da7306363fad2028b916f3eab926262de928

View File

@ -171,6 +171,10 @@ integTest {
} }
configFile 'scripts/my_script.js' configFile 'scripts/my_script.js'
configFile 'scripts/my_script.py' configFile 'scripts/my_script.py'
configFile 'scripts/my_init_script.painless'
configFile 'scripts/my_map_script.painless'
configFile 'scripts/my_combine_script.painless'
configFile 'scripts/my_reduce_script.painless'
configFile 'userdict_ja.txt' configFile 'userdict_ja.txt'
configFile 'KeywordTokenizer.rbbi' configFile 'KeywordTokenizer.rbbi'
// Whitelist reindexing from the local node so we can test it. // Whitelist reindexing from the local node so we can test it.
@ -249,6 +253,39 @@ buildRestTests.setups['host'] = '''
- set: {nodes.$master.http.publish_address: host} - set: {nodes.$master.http.publish_address: host}
''' '''
// Used by scripted metric docs
buildRestTests.setups['ledger'] = '''
- do:
indices.create:
index: ledger
body:
settings:
number_of_shards: 2
number_of_replicas: 1
mappings:
sale:
properties:
type:
type: keyword
amount:
type: double
- do:
bulk:
index: ledger
type: item
refresh: true
body: |
{"index":{}}
{"date": "2015/01/01 00:00:00", "amount": 200, "type": "sale", "description": "something"}
{"index":{}}
{"date": "2015/01/01 00:00:00", "amount": 10, "type": "expense", "decription": "another thing"}
{"index":{}}
{"date": "2015/01/01 00:00:00", "amount": 150, "type": "sale", "description": "blah"}
{"index":{}}
{"date": "2015/01/01 00:00:00", "amount": 50, "type": "expense", "description": "cost of blah"}
{"index":{}}
{"date": "2015/01/01 00:00:00", "amount": 50, "type": "expense", "description": "advertisement"}'''
// Used by pipeline aggregation docs // Used by pipeline aggregation docs
buildRestTests.setups['sales'] = ''' buildRestTests.setups['sales'] = '''
- do: - do:

View File

@ -196,6 +196,17 @@ second document falls into the bucket for 1 October 2015:
<1> The `key_as_string` value represents midnight on each day <1> The `key_as_string` value represents midnight on each day
in the specified time zone. in the specified time zone.
WARNING: When using time zones that follow DST (daylight savings time) changes,
buckets close to the moment when those changes happen can have slightly different
sizes than would be expected from the used `interval`.
For example, consider a DST start in the `CET` time zone: on 27 March 2016 at 2am,
clocks were turned forward 1 hour to 3am local time. When using `day` as `interval`,
the bucket covering that day will only hold data for 23 hours instead of the usual
24 hours for other buckets. The same is true for shorter intervals like e.g. 12h.
Here, we will have only a 11h bucket on the morning of 27 March when the DST shift
happens.
==== Offset ==== Offset
The `offset` parameter is used to change the start value of each bucket by the The `offset` parameter is used to change the start value of each bucket by the

View File

@ -9,6 +9,7 @@ Example:
[source,js] [source,js]
-------------------------------------------------- --------------------------------------------------
POST ledger/_search?size=0
{ {
"query" : { "query" : {
"match_all" : {} "match_all" : {}
@ -16,15 +17,17 @@ Example:
"aggs": { "aggs": {
"profit": { "profit": {
"scripted_metric": { "scripted_metric": {
"init_script" : "_agg['transactions'] = []", "init_script" : "params._agg.transactions = []",
"map_script" : "if (doc['type'].value == \"sale\") { _agg.transactions.add(doc['amount'].value) } else { _agg.transactions.add(-1 * doc['amount'].value) }", <1> "map_script" : "params._agg.transactions.add(doc.type.value == 'sale' ? doc.amount.value : -1 * doc.amount.value)", <1>
"combine_script" : "profit = 0; for (t in _agg.transactions) { profit += t }; return profit", "combine_script" : "double profit = 0; for (t in params._agg.transactions) { profit += t } return profit",
"reduce_script" : "profit = 0; for (a in _aggs) { profit += a }; return profit" "reduce_script" : "double profit = 0; for (a in params._aggs) { profit += a } return profit"
} }
} }
} }
} }
-------------------------------------------------- --------------------------------------------------
// CONSOLE
// TEST[setup:ledger]
<1> `map_script` is the only required parameter <1> `map_script` is the only required parameter
@ -35,24 +38,24 @@ The response for the above aggregation:
[source,js] [source,js]
-------------------------------------------------- --------------------------------------------------
{ {
"took": 218,
... ...
"aggregations": { "aggregations": {
"profit": { "profit": {
"value": 170 "value": 240.0
} }
} }
} }
-------------------------------------------------- --------------------------------------------------
// TESTRESPONSE[s/"took": 218/"took": $body.took/]
// TESTRESPONSE[s/\.\.\./"_shards": $body._shards, "hits": $body.hits, "timed_out": false,/]
The above example can also be specified using file scripts as follows: The above example can also be specified using file scripts as follows:
[source,js] [source,js]
-------------------------------------------------- --------------------------------------------------
POST ledger/_search?size=0
{ {
"query" : {
"match_all" : {}
},
"aggs": { "aggs": {
"profit": { "profit": {
"scripted_metric": { "scripted_metric": {
@ -66,18 +69,42 @@ The above example can also be specified using file scripts as follows:
"file": "my_combine_script" "file": "my_combine_script"
}, },
"params": { "params": {
"field": "amount" <1> "field": "amount", <1>
"_agg": {} <2>
}, },
"reduce_script" : { "reduce_script" : {
"file": "my_reduce_script" "file": "my_reduce_script"
}, }
} }
} }
} }
} }
-------------------------------------------------- --------------------------------------------------
// CONSOLE
// TEST[setup:ledger]
<1> script parameters for init, map and combine scripts must be specified in a global `params` object so that it can be share between the scripts <1> script parameters for `init`, `map` and `combine` scripts must be specified
in a global `params` object so that it can be share between the scripts.
<2> if you specify script parameters then you must specify `"_agg": {}`.
////
Verify this response as well but in a hidden block.
[source,js]
--------------------------------------------------
{
"took": 218,
...
"aggregations": {
"profit": {
"value": 240.0
}
}
}
--------------------------------------------------
// TESTRESPONSE[s/"took": 218/"took": $body.took/]
// TESTRESPONSE[s/\.\.\./"_shards": $body._shards, "hits": $body.hits, "timed_out": false,/]
////
For more details on specifying scripts see <<modules-scripting, script documentation>>. For more details on specifying scripts see <<modules-scripting, script documentation>>.
@ -88,7 +115,7 @@ Whilst and valid script object can be used within a single script. the scripts m
* primitive types * primitive types
* String * String
* Map (containing only keys and values of the types listed here) * Map (containing only keys and values of the types listed here)
* Array (containing elements of only the types listed here) * Array (containing elements of only the types listed here)
==== Scope of scripts ==== Scope of scripts
@ -98,24 +125,24 @@ init_script:: Executed prior to any collection of documents. Allows the ag
+ +
In the above example, the `init_script` creates an array `transactions` in the `_agg` object. In the above example, the `init_script` creates an array `transactions` in the `_agg` object.
map_script:: Executed once per document collected. This is the only required script. If no combine_script is specified, the resulting state map_script:: Executed once per document collected. This is the only required script. If no combine_script is specified, the resulting state
needs to be stored in an object named `_agg`. needs to be stored in an object named `_agg`.
+ +
In the above example, the `map_script` checks the value of the type field. If the value is 'sale' the value of the amount field In the above example, the `map_script` checks the value of the type field. If the value is 'sale' the value of the amount field
is added to the transactions array. If the value of the type field is not 'sale' the negated value of the amount field is added is added to the transactions array. If the value of the type field is not 'sale' the negated value of the amount field is added
to transactions. to transactions.
combine_script:: Executed once on each shard after document collection is complete. Allows the aggregation to consolidate the state returned from combine_script:: Executed once on each shard after document collection is complete. Allows the aggregation to consolidate the state returned from
each shard. If a combine_script is not provided the combine phase will return the aggregation variable. each shard. If a combine_script is not provided the combine phase will return the aggregation variable.
+ +
In the above example, the `combine_script` iterates through all the stored transactions, summing the values in the `profit` variable In the above example, the `combine_script` iterates through all the stored transactions, summing the values in the `profit` variable
and finally returns `profit`. and finally returns `profit`.
reduce_script:: Executed once on the coordinating node after all shards have returned their results. The script is provided with access to a reduce_script:: Executed once on the coordinating node after all shards have returned their results. The script is provided with access to a
variable `_aggs` which is an array of the result of the combine_script on each shard. If a reduce_script is not provided variable `_aggs` which is an array of the result of the combine_script on each shard. If a reduce_script is not provided
the reduce phase will return the `_aggs` variable. the reduce phase will return the `_aggs` variable.
+ +
In the above example, the `reduce_script` iterates through the `profit` returned by each shard summing the values before returning the In the above example, the `reduce_script` iterates through the `profit` returned by each shard summing the values before returning the
final combined profit which will be returned in the response of the aggregation. final combined profit which will be returned in the response of the aggregation.
==== Worked Example ==== Worked Example
@ -124,36 +151,19 @@ Imagine a situation where you index the following documents into and index with
[source,js] [source,js]
-------------------------------------------------- --------------------------------------------------
$ curl -XPUT 'http://localhost:9200/transactions/stock/1' -d ' PUT /transactions/stock/_bulk?refresh
{ {"index":{"_id":1}}
"type": "sale", {"type": "sale","amount": 80}
"amount": 80 {"index":{"_id":2}}
} {"type": "cost","amount": 10}
' {"index":{"_id":2}}
{"type": "cost","amount": 30}
$ curl -XPUT 'http://localhost:9200/transactions/stock/2' -d ' {"index":{"_id":2}}
{ {"type": "sale","amount": 130}
"type": "cost",
"amount": 10
}
'
$ curl -XPUT 'http://localhost:9200/transactions/stock/3' -d '
{
"type": "cost",
"amount": 30
}
'
$ curl -XPUT 'http://localhost:9200/transactions/stock/4' -d '
{
"type": "sale",
"amount": 130
}
'
-------------------------------------------------- --------------------------------------------------
// CONSOLE
Lets say that documents 1 and 3 end up on shard A and documents 2 and 4 end up on shard B. The following is a breakdown of what the aggregation result is Lets say that documents 1 and 3 end up on shard A and documents 2 and 4 end up on shard B. The following is a breakdown of what the aggregation result is
at each stage of the example above. at each stage of the example above.
===== Before init_script ===== Before init_script
@ -221,7 +231,7 @@ Shard B::
===== After combine_script ===== After combine_script
The combine_script is executed on each shard after document collection is complete and reduces all the transactions down to a single profit figure for each The combine_script is executed on each shard after document collection is complete and reduces all the transactions down to a single profit figure for each
shard (by summing the values in the transactions array) which is passed back to the coordinating node: shard (by summing the values in the transactions array) which is passed back to the coordinating node:
Shard A:: 50 Shard A:: 50
@ -239,7 +249,7 @@ The reduce_script receives an `_aggs` array containing the result of the combine
] ]
-------------------------------------------------- --------------------------------------------------
It reduces the responses for the shards down to a final overall profit figure (by summing the values) and returns this as the result of the aggregation to It reduces the responses for the shards down to a final overall profit figure (by summing the values) and returns this as the result of the aggregation to
produce the response: produce the response:
[source,js] [source,js]
@ -258,8 +268,8 @@ produce the response:
==== Other Parameters ==== Other Parameters
[horizontal] [horizontal]
params:: Optional. An object whose contents will be passed as variables to the `init_script`, `map_script` and `combine_script`. This can be params:: Optional. An object whose contents will be passed as variables to the `init_script`, `map_script` and `combine_script`. This can be
useful to allow the user to control the behavior of the aggregation and for storing state between the scripts. If this is not specified, useful to allow the user to control the behavior of the aggregation and for storing state between the scripts. If this is not specified,
the default is the equivalent of providing: the default is the equivalent of providing:
+ +
[source,js] [source,js]
@ -268,4 +278,3 @@ params:: Optional. An object whose contents will be passed as variable
"_agg" : {} "_agg" : {}
} }
-------------------------------------------------- --------------------------------------------------

View File

@ -10,12 +10,7 @@ GET /_cat/indices/twi*?v&s=index
-------------------------------------------------- --------------------------------------------------
// CONSOLE // CONSOLE
// TEST[setup:huge_twitter] // TEST[setup:huge_twitter]
// TEST[s/^/POST _flush\n/]
// TEST[s/^/PUT twitter2\n{"settings": {"number_of_replicas": 0}}\n/] // TEST[s/^/PUT twitter2\n{"settings": {"number_of_replicas": 0}}\n/]
// We flush very early here because the index's size is cached and we sort on
// size below. So to get a realistic sort on size we need to flush here or else
// the size is just whatever portion of the index is pushed out of memory
// during test setup which isn't deterministic.
Might respond with: Might respond with:
@ -64,11 +59,11 @@ yellow open twitter u8FNjxh8Rfy_awN11oDKYQ 1 1 1200 0
// TESTRESPONSE[s/\d+(\.\d+)?[tgmk]?b/\\d+(\\.\\d+)?[tgmk]?b/] // TESTRESPONSE[s/\d+(\.\d+)?[tgmk]?b/\\d+(\\.\\d+)?[tgmk]?b/]
// TESTRESPONSE[s/u8FNjxh8Rfy_awN11oDKYQ/.+/ _cat] // TESTRESPONSE[s/u8FNjxh8Rfy_awN11oDKYQ/.+/ _cat]
What's my largest index by disk usage not including replicas? Which index has the largest number of documents?
[source,js] [source,js]
-------------------------------------------------- --------------------------------------------------
GET /_cat/indices?v&s=store.size:desc GET /_cat/indices?v&s=docs.count:desc
-------------------------------------------------- --------------------------------------------------
// CONSOLE // CONSOLE
// TEST[continued] // TEST[continued]

View File

@ -65,12 +65,11 @@ of `indices`, `os`, `process`, `jvm`, `transport`, `http`,
[source,js] [source,js]
-------------------------------------------------- --------------------------------------------------
# return indices and os # return just indices
curl -XGET 'http://localhost:9200/_nodes/stats/os' curl -XGET 'http://localhost:9200/_nodes/stats/indices'
# return just os and process # return just os and process
curl -XGET 'http://localhost:9200/_nodes/stats/os,process' curl -XGET 'http://localhost:9200/_nodes/stats/os,process'
# specific type endpoint # return just process for node with IP address 10.0.0.1
curl -XGET 'http://localhost:9200/_nodes/stats/process'
curl -XGET 'http://localhost:9200/_nodes/10.0.0.1/stats/process' curl -XGET 'http://localhost:9200/_nodes/10.0.0.1/stats/process'
-------------------------------------------------- --------------------------------------------------
@ -280,27 +279,45 @@ the current running process:
`process.mem.total_virtual_in_bytes`:: `process.mem.total_virtual_in_bytes`::
Size in bytes of virtual memory that is guaranteed to be available to the running process Size in bytes of virtual memory that is guaranteed to be available to the running process
[float] [float]
[[field-data]] [[node-indices-stats]]
=== Field data statistics === Indices statistics
You can get information about field data memory usage on node You can get information about indices stats on node level or on index level.
level or on index level.
[source,js] [source,js]
-------------------------------------------------- --------------------------------------------------
# Node Stats # Node level
curl -XGET 'http://localhost:9200/_nodes/stats/indices/?fields=field1,field2&pretty' curl -XGET 'http://localhost:9200/_nodes/stats/indices/fielddata?fields=field1,field2&pretty'
# Indices Stat # Index level
curl -XGET 'http://localhost:9200/_stats/fielddata/?fields=field1,field2&pretty' curl -XGET 'http://localhost:9200/_stats/fielddata/?fields=field1,field2&pretty'
# You can use wildcards for field names # You can use wildcards for field names
curl -XGET 'http://localhost:9200/_nodes/stats/indices/fielddata?fields=field*&pretty'
curl -XGET 'http://localhost:9200/_stats/fielddata/?fields=field*&pretty' curl -XGET 'http://localhost:9200/_stats/fielddata/?fields=field*&pretty'
curl -XGET 'http://localhost:9200/_nodes/stats/indices/?fields=field*&pretty'
-------------------------------------------------- --------------------------------------------------
Supported metrics are:
* `completion`
* `docs`
* `fielddata`
* `flush`
* `get`
* `indexing`
* `merge`
* `query_cache`
* `recovery`
* `refresh`
* `request_cache`
* `search`
* `segments`
* `store`
* `suggest`
* `translog`
* `warmer`
[float] [float]
[[search-groups]] [[search-groups]]
=== Search groups === Search groups

View File

@ -69,8 +69,7 @@ section for more information on mapping definitions.
Automatic index creation can be disabled by setting Automatic index creation can be disabled by setting
`action.auto_create_index` to `false` in the config file of all nodes. `action.auto_create_index` to `false` in the config file of all nodes.
Automatic mapping creation can be disabled by setting Automatic mapping creation can be disabled by setting
`index.mapper.dynamic` to `false` in the config files of all nodes (or `index.mapper.dynamic` to `false` per-index as an index setting.
on the specific index settings).
Automatic index creation can include a pattern based white/black list, Automatic index creation can include a pattern based white/black list,
for example, set `action.auto_create_index` to `+aaa*,-bbb*,+ccc*,-*` (+ for example, set `action.auto_create_index` to `+aaa*,-bbb*,+ccc*,-*` (+

View File

@ -522,7 +522,7 @@ instead of deleting all documents with the Delete By Query API.
=== Batch Processing === Batch Processing
In addition to being able to index, update, and delete individual documents, Elasticsearch also provides the ability to perform any of the above operations in batches using the <<docs-bulk,`_bulk` API>>. This functionality is important in that it provides a very efficient mechanism to do multiple operations as fast as possible with as little network roundtrips as possible. In addition to being able to index, update, and delete individual documents, Elasticsearch also provides the ability to perform any of the above operations in batches using the <<docs-bulk,`_bulk` API>>. This functionality is important in that it provides a very efficient mechanism to do multiple operations as fast as possible with as few network roundtrips as possible.
As a quick example, the following call indexes two documents (ID 1 - John Doe and ID 2 - Jane Doe) in one bulk operation: As a quick example, the following call indexes two documents (ID 1 - John Doe and ID 2 - Jane Doe) in one bulk operation:

View File

@ -74,7 +74,7 @@ the <<indices-stats,indices stats>> API:
[source,sh] [source,sh]
-------------------------------------------------- --------------------------------------------------
GET twitter/_stats/commit?level=shards GET twitter/_stats?level=shards
-------------------------------------------------- --------------------------------------------------
// CONSOLE // CONSOLE
// TEST[s/^/PUT twitter\n/] // TEST[s/^/PUT twitter\n/]

View File

@ -32,21 +32,21 @@ PUT my_index
POST my_index/my_type/1 POST my_index/my_type/1
{ {
"is_published": true <1> "is_published": 1 <1>
} }
GET my_index/_search GET my_index/_search
{ {
"query": { "query": {
"term": { "term": {
"is_published": 1 <2> "is_published": true <2>
} }
} }
} }
-------------------------------------------------- --------------------------------------------------
// CONSOLE // CONSOLE
<1> Indexing a document with a JSON `true`. <1> Indexing a document with `1`, which is interpreted as `true`.
<2> Querying for the document with `1`, which is interpreted as `true`. <2> Searching for documents with a JSON `true`.
Aggregations like the <<search-aggregations-bucket-terms-aggregation,`terms` Aggregations like the <<search-aggregations-bucket-terms-aggregation,`terms`
aggregation>> use `1` and `0` for the `key`, and the strings `"true"` and aggregation>> use `1` and `0` for the `key`, and the strings `"true"` and

View File

@ -5,3 +5,7 @@
* The `collect_payloads` parameter of the `span_near` query has been removed. Payloads will be * The `collect_payloads` parameter of the `span_near` query has been removed. Payloads will be
loaded when needed. loaded when needed.
* Queries on boolean fields now strictly parse boolean-like values. This means
only the strings `"true"` and `"false"` will be parsed into their boolean
counterparts. Other strings will cause an error to be thrown.

View File

@ -109,7 +109,8 @@ which field to search on. It defaults to `_all` field.
If the `_all` field is disabled, the `query_string` query will automatically If the `_all` field is disabled, the `query_string` query will automatically
attempt to determine the existing fields in the index's mapping that are attempt to determine the existing fields in the index's mapping that are
queryable, and perform the search on those fields. queryable, and perform the search on those fields. Note that this will not
include nested documents, use a nested query to search those documents.
[float] [float]
==== Multi Field ==== Multi Field

View File

@ -179,4 +179,4 @@ logs to roll and compress after 1 GB, and to preserve a maximum of five log
files (four rolled logs, and the active log). files (four rolled logs, and the active log).
You can disable it in the `config/log4j2.properties` file by setting the deprecation You can disable it in the `config/log4j2.properties` file by setting the deprecation
log level to `info`. log level to `error`.

View File

@ -0,0 +1,5 @@
double profit = 0;
for (t in params._agg.transactions) {
profit += t
}
return profit

View File

@ -0,0 +1 @@
params._agg.transactions = []

View File

@ -0,0 +1 @@
params._agg.transactions.add(doc.type.value == 'sale' ? doc.amount.value : -1 * doc.amount.value)

View File

@ -0,0 +1,5 @@
double profit = 0;
for (a in params._aggs) {
profit += a
}
return profit

View File

@ -234,33 +234,13 @@ public class PercolatorFieldMapper extends FieldMapper {
KeywordFieldMapper queryTermsField, KeywordFieldMapper extractionResultField, KeywordFieldMapper queryTermsField, KeywordFieldMapper extractionResultField,
BinaryFieldMapper queryBuilderField) { BinaryFieldMapper queryBuilderField) {
super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo); super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo);
this.queryShardContext = new QueryShardContextSupplierCache(queryShardContext); this.queryShardContext = queryShardContext;
this.queryTermsField = queryTermsField; this.queryTermsField = queryTermsField;
this.extractionResultField = extractionResultField; this.extractionResultField = extractionResultField;
this.queryBuilderField = queryBuilderField; this.queryBuilderField = queryBuilderField;
this.mapUnmappedFieldAsString = INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING.get(indexSettings); this.mapUnmappedFieldAsString = INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING.get(indexSettings);
} }
private static class QueryShardContextSupplierCache implements Supplier<QueryShardContext> {
private final Supplier<QueryShardContext> supplier;
private volatile QueryShardContext context;
QueryShardContextSupplierCache(Supplier<QueryShardContext> supplier) {
this.supplier = supplier;
}
@Override
public QueryShardContext get() {
QueryShardContext context = this.context;
if (context == null) {
context = this.context = supplier.get();
}
// return a copy
return new QueryShardContext(context);
}
}
@Override @Override
public FieldMapper updateFieldType(Map<String, MappedFieldType> fullNameToFieldType) { public FieldMapper updateFieldType(Map<String, MappedFieldType> fullNameToFieldType) {
PercolatorFieldMapper updated = (PercolatorFieldMapper) super.updateFieldType(fullNameToFieldType); PercolatorFieldMapper updated = (PercolatorFieldMapper) super.updateFieldType(fullNameToFieldType);

View File

@ -39,7 +39,8 @@ import static org.hamcrest.Matchers.nullValue;
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE, @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE,
numDataNodes = 0, numDataNodes = 0,
transportClientRatio = 0.0, transportClientRatio = 0.0,
numClientNodes = 0) numClientNodes = 0,
autoMinMasterNodes = false)
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch-cloud-azure/issues/89") @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch-cloud-azure/issues/89")
public class AzureMinimumMasterNodesTests extends AbstractAzureComputeServiceTestCase { public class AzureMinimumMasterNodesTests extends AbstractAzureComputeServiceTestCase {

View File

@ -33,7 +33,7 @@ import static org.hamcrest.CoreMatchers.is;
* starting. * starting.
* This test requires AWS to run. * This test requires AWS to run.
*/ */
@ClusterScope(scope = Scope.TEST, numDataNodes = 0, numClientNodes = 0, transportClientRatio = 0.0) @ClusterScope(scope = Scope.TEST, numDataNodes = 0, numClientNodes = 0, transportClientRatio = 0.0, autoMinMasterNodes = false)
public class Ec2DiscoveryUpdateSettingsTests extends AbstractAwsTestCase { public class Ec2DiscoveryUpdateSettingsTests extends AbstractAwsTestCase {
public void testMinimumMasterNodesStart() { public void testMinimumMasterNodesStart() {
Settings nodeSettings = Settings.builder() Settings nodeSettings = Settings.builder()

View File

@ -88,7 +88,11 @@ setup() {
sudo chmod +x $JAVA sudo chmod +x $JAVA
[ "$status" -eq 1 ] [ "$status" -eq 1 ]
[[ "$output" == *"Could not find any executable java binary. Please install java in your PATH or set JAVA_HOME"* ]] local expected="Could not find any executable java binary. Please install java in your PATH or set JAVA_HOME"
[[ "$output" == *"$expected"* ]] || {
echo "Expected error message [$expected] but found: $output"
false
}
} }
################################## ##################################

View File

@ -176,7 +176,11 @@ fi
sudo chmod +x $JAVA sudo chmod +x $JAVA
[ "$status" -eq 1 ] [ "$status" -eq 1 ]
[[ "$output" == *"Could not find any executable java binary. Please install java in your PATH or set JAVA_HOME"* ]] local expected="Could not find any executable java binary. Please install java in your PATH or set JAVA_HOME"
[[ "$output" == *"$expected"* ]] || {
echo "Expected error message [$expected] but found: $output"
false
}
} }
# Note that all of the tests from here to the end of the file expect to be run # Note that all of the tests from here to the end of the file expect to be run

View File

@ -1 +1,2 @@
5.0.0 5.0.0
5.0.1

View File

@ -1,5 +1,8 @@
--- ---
"Shrink index via API": "Shrink index via API":
- skip:
features: always
reason: Fails consistently for Nik and sometimes for Jenkins. Skip until we can get it passing consistently.
# creates an index with one document solely allocated on the master node # creates an index with one document solely allocated on the master node
# and shrinks it into a new index with a single shard # and shrinks it into a new index with a single shard
# we don't do the relocation to a single node after the index is created # we don't do the relocation to a single node after the index is created
@ -9,7 +12,7 @@
- do: - do:
cluster.state: {} cluster.state: {}
# Get master node id # Get master node id
- set: { master_node: master } - set: { master_node: master }
- do: - do:

View File

@ -100,3 +100,16 @@ setup:
- is_false: indices.test1 - is_false: indices.test1
- is_true: indices.test2 - is_true: indices.test2
---
"Indices stats unrecognized parameter":
- skip:
version: " - 5.0.99"
reason: strict stats handling does not exist in 5.0
- do:
indices.stats:
metric: [ fieldata ]
ignore: 400
- match: { status: 400 }
- match: { error.type: illegal_argument_exception }
- match: { error.reason: "request [/_stats/fieldata] contains unrecognized metric: [fieldata] -> did you mean [fielddata]?" }

View File

@ -20,3 +20,17 @@
level: "indices" level: "indices"
- is_true: nodes.$master.indices.indices - is_true: nodes.$master.indices.indices
---
"Nodes stats unrecognized parameter":
- skip:
version: " - 5.0.99"
reason: strict stats handling does not exist in 5.0
- do:
nodes.stats:
metric: [ transprot ]
ignore: 400
- match: { status: 400 }
- match: { error.type: illegal_argument_exception }
- match: { error.reason: "request [/_nodes/stats/transprot] contains unrecognized metric: [transprot] -> did you mean [transport]?" }

View File

@ -149,6 +149,7 @@ import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
import java.util.IdentityHashMap; import java.util.IdentityHashMap;
import java.util.List; import java.util.List;
@ -178,6 +179,7 @@ import static org.elasticsearch.test.XContentTestUtils.differenceBetweenMapsIgno
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.emptyArray; import static org.hamcrest.Matchers.emptyArray;
import static org.hamcrest.Matchers.emptyIterable; import static org.hamcrest.Matchers.emptyIterable;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
@ -406,6 +408,9 @@ public abstract class ESIntegTestCase extends ESTestCase {
if (randomBoolean()) { if (randomBoolean()) {
randomSettingsBuilder.put(IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.getKey(), randomBoolean()); randomSettingsBuilder.put(IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.getKey(), randomBoolean());
} }
if (randomBoolean()) {
randomSettingsBuilder.put(IndexModule.INDEX_QUERY_CACHE_TERM_QUERIES_SETTING.getKey(), randomBoolean());
}
PutIndexTemplateRequestBuilder putTemplate = client().admin().indices() PutIndexTemplateRequestBuilder putTemplate = client().admin().indices()
.preparePutTemplate("random_index_template") .preparePutTemplate("random_index_template")
.setPatterns(Collections.singletonList("*")) .setPatterns(Collections.singletonList("*"))
@ -527,10 +532,15 @@ public abstract class ESIntegTestCase extends ESTestCase {
if (cluster() != null) { if (cluster() != null) {
if (currentClusterScope != Scope.TEST) { if (currentClusterScope != Scope.TEST) {
MetaData metaData = client().admin().cluster().prepareState().execute().actionGet().getState().getMetaData(); MetaData metaData = client().admin().cluster().prepareState().execute().actionGet().getState().getMetaData();
assertThat("test leaves persistent cluster metadata behind: " + metaData.persistentSettings().getAsMap(), metaData final Map<String, String> persistent = metaData.persistentSettings().getAsMap();
.persistentSettings().getAsMap().size(), equalTo(0)); assertThat("test leaves persistent cluster metadata behind: " + persistent, persistent.size(), equalTo(0));
assertThat("test leaves transient cluster metadata behind: " + metaData.transientSettings().getAsMap(), metaData final Map<String, String> transientSettings = new HashMap<>(metaData.transientSettings().getAsMap());
.transientSettings().getAsMap().size(), equalTo(0)); if (isInternalCluster() && internalCluster().getAutoManageMinMasterNode()) {
// this is set by the test infra
transientSettings.remove(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey());
}
assertThat("test leaves transient cluster metadata behind: " + transientSettings,
transientSettings.keySet(), empty());
} }
ensureClusterSizeConsistency(); ensureClusterSizeConsistency();
ensureClusterStateConsistency(); ensureClusterStateConsistency();
@ -1518,6 +1528,12 @@ public abstract class ESIntegTestCase extends ESTestCase {
*/ */
boolean supportsDedicatedMasters() default true; boolean supportsDedicatedMasters() default true;
/**
* The cluster automatically manages the {@link ElectMasterService#DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING} by default
* as nodes are started and stopped. Set this to false to manage the setting manually.
*/
boolean autoMinMasterNodes() default true;
/** /**
* Returns the number of client nodes in the cluster. Default is {@link InternalTestCluster#DEFAULT_NUM_CLIENT_NODES}, a * Returns the number of client nodes in the cluster. Default is {@link InternalTestCluster#DEFAULT_NUM_CLIENT_NODES}, a
* negative value means that the number of client nodes will be randomized. * negative value means that the number of client nodes will be randomized.
@ -1615,6 +1631,11 @@ public abstract class ESIntegTestCase extends ESTestCase {
return annotation == null ? true : annotation.supportsDedicatedMasters(); return annotation == null ? true : annotation.supportsDedicatedMasters();
} }
private boolean getAutoMinMasterNodes() {
ClusterScope annotation = getAnnotation(this.getClass(), ClusterScope.class);
return annotation == null ? true : annotation.autoMinMasterNodes();
}
private int getNumDataNodes() { private int getNumDataNodes() {
ClusterScope annotation = getAnnotation(this.getClass(), ClusterScope.class); ClusterScope annotation = getAnnotation(this.getClass(), ClusterScope.class);
return annotation == null ? -1 : annotation.numDataNodes(); return annotation == null ? -1 : annotation.numDataNodes();
@ -1753,7 +1774,8 @@ public abstract class ESIntegTestCase extends ESTestCase {
} }
mockPlugins = mocks; mockPlugins = mocks;
} }
return new InternalTestCluster(seed, createTempDir(), supportsDedicatedMasters, minNumDataNodes, maxNumDataNodes, return new InternalTestCluster(seed, createTempDir(), supportsDedicatedMasters, getAutoMinMasterNodes(),
minNumDataNodes, maxNumDataNodes,
InternalTestCluster.clusterName(scope.name(), seed) + "-cluster", nodeConfigurationSource, getNumClientNodes(), InternalTestCluster.clusterName(scope.name(), seed) + "-cluster", nodeConfigurationSource, getNumClientNodes(),
InternalTestCluster.DEFAULT_ENABLE_HTTP_PIPELINING, nodePrefix, mockPlugins, getClientWrapper()); InternalTestCluster.DEFAULT_ENABLE_HTTP_PIPELINING, nodePrefix, mockPlugins, getClientWrapper());
} }

View File

@ -444,7 +444,6 @@ public abstract class ESTestCase extends LuceneTestCase {
return RandomPicks.randomFrom(random, array); return RandomPicks.randomFrom(random, array);
} }
/** Pick a random object from the given list. */ /** Pick a random object from the given list. */
public static <T> T randomFrom(List<T> list) { public static <T> T randomFrom(List<T> list) {
return RandomPicks.randomFrom(random(), list); return RandomPicks.randomFrom(random(), list);
@ -452,7 +451,12 @@ public abstract class ESTestCase extends LuceneTestCase {
/** Pick a random object from the given collection. */ /** Pick a random object from the given collection. */
public static <T> T randomFrom(Collection<T> collection) { public static <T> T randomFrom(Collection<T> collection) {
return RandomPicks.randomFrom(random(), collection); return randomFrom(random(), collection);
}
/** Pick a random object from the given collection. */
public static <T> T randomFrom(Random random, Collection<T> collection) {
return RandomPicks.randomFrom(random, collection);
} }
public static String randomAsciiOfLengthBetween(int minCodeUnits, int maxCodeUnits) { public static String randomAsciiOfLengthBetween(int minCodeUnits, int maxCodeUnits) {

View File

@ -24,12 +24,10 @@ import com.carrotsearch.randomizedtesting.SysGlobals;
import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.carrotsearch.randomizedtesting.generators.RandomNumbers;
import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.carrotsearch.randomizedtesting.generators.RandomPicks;
import com.carrotsearch.randomizedtesting.generators.RandomStrings; import com.carrotsearch.randomizedtesting.generators.RandomStrings;
import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.Logger;
import org.apache.lucene.store.StoreRateLimiting; import org.apache.lucene.store.StoreRateLimiting;
import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
@ -68,7 +66,8 @@ import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.ElectMasterService;
import org.elasticsearch.discovery.zen.ZenDiscovery;
import org.elasticsearch.env.Environment; import org.elasticsearch.env.Environment;
import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.env.ShardLockObtainFailedException; import org.elasticsearch.env.ShardLockObtainFailedException;
@ -133,8 +132,10 @@ import java.util.stream.Stream;
import static org.apache.lucene.util.LuceneTestCase.TEST_NIGHTLY; import static org.apache.lucene.util.LuceneTestCase.TEST_NIGHTLY;
import static org.apache.lucene.util.LuceneTestCase.rarely; import static org.apache.lucene.util.LuceneTestCase.rarely;
import static org.elasticsearch.discovery.DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING;
import static org.elasticsearch.test.ESTestCase.assertBusy; import static org.elasticsearch.test.ESTestCase.assertBusy;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout; import static org.elasticsearch.test.ESTestCase.randomFrom;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo;
@ -225,6 +226,8 @@ public final class InternalTestCluster extends TestCluster {
private final ExecutorService executor; private final ExecutorService executor;
private final boolean autoManageMinMasterNodes;
private final Collection<Class<? extends Plugin>> mockPlugins; private final Collection<Class<? extends Plugin>> mockPlugins;
/** /**
@ -238,9 +241,10 @@ public final class InternalTestCluster extends TestCluster {
public InternalTestCluster(long clusterSeed, Path baseDir, public InternalTestCluster(long clusterSeed, Path baseDir,
boolean randomlyAddDedicatedMasters, boolean randomlyAddDedicatedMasters,
int minNumDataNodes, int maxNumDataNodes, String clusterName, NodeConfigurationSource nodeConfigurationSource, int numClientNodes, boolean autoManageMinMasterNodes, int minNumDataNodes, int maxNumDataNodes, String clusterName, NodeConfigurationSource nodeConfigurationSource, int numClientNodes,
boolean enableHttpPipelining, String nodePrefix, Collection<Class<? extends Plugin>> mockPlugins, Function<Client, Client> clientWrapper) { boolean enableHttpPipelining, String nodePrefix, Collection<Class<? extends Plugin>> mockPlugins, Function<Client, Client> clientWrapper) {
super(clusterSeed); super(clusterSeed);
this.autoManageMinMasterNodes = autoManageMinMasterNodes;
this.clientWrapper = clientWrapper; this.clientWrapper = clientWrapper;
this.baseDir = baseDir; this.baseDir = baseDir;
this.clusterName = clusterName; this.clusterName = clusterName;
@ -345,6 +349,11 @@ public final class InternalTestCluster extends TestCluster {
return clusterName; return clusterName;
} }
/** returns true if the {@link ElectMasterService#DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING} setting is auto managed by this cluster */
public boolean getAutoManageMinMasterNode() {
return autoManageMinMasterNodes;
}
public String[] getNodeNames() { public String[] getNodeNames() {
return nodes.keySet().toArray(Strings.EMPTY_ARRAY); return nodes.keySet().toArray(Strings.EMPTY_ARRAY);
} }
@ -466,7 +475,7 @@ public final class InternalTestCluster extends TestCluster {
if (randomNodeAndClient != null) { if (randomNodeAndClient != null) {
return randomNodeAndClient; return randomNodeAndClient;
} }
NodeAndClient buildNode = buildNode(); NodeAndClient buildNode = buildNode(1);
buildNode.startNode(); buildNode.startNode();
publishNode(buildNode); publishNode(buildNode);
return buildNode; return buildNode;
@ -496,30 +505,20 @@ public final class InternalTestCluster extends TestCluster {
* if more nodes than <code>n</code> are present this method will not * if more nodes than <code>n</code> are present this method will not
* stop any of the running nodes. * stop any of the running nodes.
*/ */
public void ensureAtLeastNumDataNodes(int n) { public synchronized void ensureAtLeastNumDataNodes(int n) {
final List<Async<String>> asyncs = new ArrayList<>(); boolean added = false;
synchronized (this) { int size = numDataNodes();
int size = numDataNodes(); for (int i = size; i < n; i++) {
for (int i = size; i < n; i++) { logger.info("increasing cluster size from {} to {}", size, n);
logger.info("increasing cluster size from {} to {}", size, n); added = true;
if (numSharedDedicatedMasterNodes > 0) { if (numSharedDedicatedMasterNodes > 0) {
asyncs.add(startDataOnlyNodeAsync()); startDataOnlyNode(Settings.EMPTY);
} else { } else {
asyncs.add(startNodeAsync()); startNode(Settings.EMPTY);
}
} }
} }
try { if (added) {
for (Async<String> async : asyncs) { validateClusterFormed();
async.get();
}
} catch (Exception e) {
throw new ElasticsearchException("failed to start nodes", e);
}
if (!asyncs.isEmpty()) {
synchronized (this) {
assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(nodes.size())).get());
}
} }
} }
@ -544,28 +543,47 @@ public final class InternalTestCluster extends TestCluster {
while (values.hasNext() && numNodesAndClients++ < size - n) { while (values.hasNext() && numNodesAndClients++ < size - n) {
NodeAndClient next = values.next(); NodeAndClient next = values.next();
nodesToRemove.add(next); nodesToRemove.add(next);
removeDisruptionSchemeFromNode(next);
next.close();
}
for (NodeAndClient toRemove : nodesToRemove) {
nodes.remove(toRemove.name);
} }
stopNodesAndClients(nodesToRemove);
if (!nodesToRemove.isEmpty() && size() > 0) { if (!nodesToRemove.isEmpty() && size() > 0) {
assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(nodes.size())).get()); validateClusterFormed();
} }
} }
private NodeAndClient buildNode(Settings settings) { /**
* builds a new node given the settings.
*
* @param settings the settings to use
* @param defaultMinMasterNodes min_master_nodes value to use if min_master_nodes is auto managed
*/
private NodeAndClient buildNode(Settings settings, int defaultMinMasterNodes) {
int ord = nextNodeId.getAndIncrement(); int ord = nextNodeId.getAndIncrement();
return buildNode(ord, random.nextLong(), settings, false); return buildNode(ord, random.nextLong(), settings, false, defaultMinMasterNodes);
} }
private NodeAndClient buildNode() { /**
* builds a new node with default settings
*
* @param defaultMinMasterNodes min_master_nodes value to use if min_master_nodes is auto managed
*/
private NodeAndClient buildNode(int defaultMinMasterNodes) {
int ord = nextNodeId.getAndIncrement(); int ord = nextNodeId.getAndIncrement();
return buildNode(ord, random.nextLong(), null, false); return buildNode(ord, random.nextLong(), null, false, defaultMinMasterNodes);
} }
private NodeAndClient buildNode(int nodeId, long seed, Settings settings, boolean reuseExisting) { /**
* builds a new node
*
* @param nodeId the node internal id (see {@link NodeAndClient#nodeAndClientId()}
* @param seed the node's random seed
* @param settings the settings to use
* @param reuseExisting if a node with the same name is already part of {@link #nodes}, no new node will be built and
* the method will return the existing one
* @param defaultMinMasterNodes min_master_nodes value to use if min_master_nodes is auto managed
*/
private NodeAndClient buildNode(int nodeId, long seed, Settings settings,
boolean reuseExisting, int defaultMinMasterNodes) {
assert Thread.holdsLock(this); assert Thread.holdsLock(this);
ensureOpen(); ensureOpen();
settings = getSettings(nodeId, seed, settings); settings = getSettings(nodeId, seed, settings);
@ -577,13 +595,21 @@ public final class InternalTestCluster extends TestCluster {
assert reuseExisting == true || nodes.containsKey(name) == false : assert reuseExisting == true || nodes.containsKey(name) == false :
"node name [" + name + "] already exists but not allowed to use it"; "node name [" + name + "] already exists but not allowed to use it";
} }
Settings finalSettings = Settings.builder() Settings.Builder finalSettings = Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), baseDir) // allow overriding path.home .put(Environment.PATH_HOME_SETTING.getKey(), baseDir) // allow overriding path.home
.put(settings) .put(settings)
.put("node.name", name) .put("node.name", name)
.put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), seed) .put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), seed);
.build();
MockNode node = new MockNode(finalSettings, plugins); if (autoManageMinMasterNodes) {
assert finalSettings.get(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey()) == null :
"min master nodes may not be set when auto managed";
finalSettings
// don't wait too long not to slow down tests
.put(ZenDiscovery.MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING.getKey(), "5s")
.put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), defaultMinMasterNodes);
}
MockNode node = new MockNode(finalSettings.build(), plugins);
return new NodeAndClient(name, node, nodeId); return new NodeAndClient(name, node, nodeId);
} }
@ -684,7 +710,7 @@ public final class InternalTestCluster extends TestCluster {
.put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_INGEST_SETTING.getKey(), false); .put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_INGEST_SETTING.getKey(), false);
if (size() == 0) { if (size() == 0) {
// if we are the first node - don't wait for a state // if we are the first node - don't wait for a state
builder.put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), 0); builder.put(INITIAL_STATE_TIMEOUT_SETTING.getKey(), 0);
} }
return startNode(builder); return startNode(builder);
} }
@ -777,6 +803,10 @@ public final class InternalTestCluster extends TestCluster {
return nodeAndClientId; return nodeAndClientId;
} }
public boolean isMasterEligible() {
return Node.NODE_MASTER_SETTING.get(node.settings());
}
Client client(Random random) { Client client(Random random) {
if (closed.get()) { if (closed.get()) {
throw new RuntimeException("already closed"); throw new RuntimeException("already closed");
@ -844,21 +874,40 @@ public final class InternalTestCluster extends TestCluster {
node.close(); node.close();
} }
void restart(RestartCallback callback, boolean clearDataIfNeeded) throws Exception { /**
assert callback != null; * closes the current node if not already closed, builds a new node object using the current node settings and starts it
resetClient(); */
void restart(RestartCallback callback, boolean clearDataIfNeeded, int minMasterNodes) throws Exception {
if (!node.isClosed()) { if (!node.isClosed()) {
closeNode(); closeNode();
} }
Settings newSettings = callback.onNodeStopped(name); recreateNodeOnRestart(callback, clearDataIfNeeded, minMasterNodes);
if (newSettings == null) { startNode();
newSettings = Settings.EMPTY; }
/**
* rebuilds a new node object using the current node settings and starts it
*/
void recreateNodeOnRestart(RestartCallback callback, boolean clearDataIfNeeded, int minMasterNodes) throws Exception {
assert callback != null;
Settings callbackSettings = callback.onNodeStopped(name);
Settings.Builder newSettings = Settings.builder();
if (callbackSettings != null) {
newSettings.put(callbackSettings);
} }
if (minMasterNodes >= 0) {
assert ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.exists(newSettings.build()) == false : "min master nodes is auto managed";
newSettings.put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), minMasterNodes).build();
}
// validation is (optionally) done in fullRestart/rollingRestart
newSettings.put(INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s");
if (clearDataIfNeeded) { if (clearDataIfNeeded) {
clearDataIfNeeded(callback); clearDataIfNeeded(callback);
} }
createNewNode(newSettings); createNewNode(newSettings.build());
startNode(); // make sure cached client points to new node
resetClient();
} }
private void clearDataIfNeeded(RestartCallback callback) throws IOException { private void clearDataIfNeeded(RestartCallback callback) throws IOException {
@ -948,22 +997,24 @@ public final class InternalTestCluster extends TestCluster {
if (wipeData) { if (wipeData) {
wipePendingDataDirectories(); wipePendingDataDirectories();
} }
if (nodes.size() > 0 && autoManageMinMasterNodes) {
updateMinMasterNodes(getMasterNodesCount());
}
logger.debug("Cluster hasn't changed - moving out - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), newSize); logger.debug("Cluster hasn't changed - moving out - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), newSize);
return; return;
} }
logger.debug("Cluster is NOT consistent - restarting shared nodes - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), newSize); logger.debug("Cluster is NOT consistent - restarting shared nodes - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), newSize);
// trash all nodes with id >= sharedNodesSeeds.length - they are non shared // trash all nodes with id >= sharedNodesSeeds.length - they are non shared
final List<NodeAndClient> toClose = new ArrayList<>();
for (Iterator<NodeAndClient> iterator = nodes.values().iterator(); iterator.hasNext();) { for (Iterator<NodeAndClient> iterator = nodes.values().iterator(); iterator.hasNext();) {
NodeAndClient nodeAndClient = iterator.next(); NodeAndClient nodeAndClient = iterator.next();
if (nodeAndClient.nodeAndClientId() >= sharedNodesSeeds.length) { if (nodeAndClient.nodeAndClientId() >= sharedNodesSeeds.length) {
logger.debug("Close Node [{}] not shared", nodeAndClient.name); logger.debug("Close Node [{}] not shared", nodeAndClient.name);
nodeAndClient.close(); toClose.add(nodeAndClient);
iterator.remove();
} }
} }
stopNodesAndClients(toClose);
// clean up what the nodes left that is unused // clean up what the nodes left that is unused
if (wipeData) { if (wipeData) {
@ -972,13 +1023,19 @@ public final class InternalTestCluster extends TestCluster {
// start any missing node // start any missing node
assert newSize == numSharedDedicatedMasterNodes + numSharedDataNodes + numSharedCoordOnlyNodes; assert newSize == numSharedDedicatedMasterNodes + numSharedDataNodes + numSharedCoordOnlyNodes;
final int numberOfMasterNodes = numSharedDedicatedMasterNodes > 0 ? numSharedDedicatedMasterNodes : numSharedDataNodes;
final int defaultMinMasterNodes = (numberOfMasterNodes / 2) + 1;
final List<NodeAndClient> toStartAndPublish = new ArrayList<>(); // we want to start nodes in one go due to min master nodes
for (int i = 0; i < numSharedDedicatedMasterNodes; i++) { for (int i = 0; i < numSharedDedicatedMasterNodes; i++) {
final Settings.Builder settings = Settings.builder(); final Settings.Builder settings = Settings.builder();
settings.put(Node.NODE_MASTER_SETTING.getKey(), true).build(); settings.put(Node.NODE_MASTER_SETTING.getKey(), true);
settings.put(Node.NODE_DATA_SETTING.getKey(), false).build(); settings.put(Node.NODE_DATA_SETTING.getKey(), false);
NodeAndClient nodeAndClient = buildNode(i, sharedNodesSeeds[i], settings.build(), true); if (autoManageMinMasterNodes) {
nodeAndClient.startNode(); settings.put(INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s"); // we wait at the end
publishNode(nodeAndClient); }
NodeAndClient nodeAndClient = buildNode(i, sharedNodesSeeds[i], settings.build(), true, defaultMinMasterNodes);
toStartAndPublish.add(nodeAndClient);
} }
for (int i = numSharedDedicatedMasterNodes; i < numSharedDedicatedMasterNodes + numSharedDataNodes; i++) { for (int i = numSharedDedicatedMasterNodes; i < numSharedDedicatedMasterNodes + numSharedDataNodes; i++) {
final Settings.Builder settings = Settings.builder(); final Settings.Builder settings = Settings.builder();
@ -987,32 +1044,48 @@ public final class InternalTestCluster extends TestCluster {
settings.put(Node.NODE_MASTER_SETTING.getKey(), false).build(); settings.put(Node.NODE_MASTER_SETTING.getKey(), false).build();
settings.put(Node.NODE_DATA_SETTING.getKey(), true).build(); settings.put(Node.NODE_DATA_SETTING.getKey(), true).build();
} }
NodeAndClient nodeAndClient = buildNode(i, sharedNodesSeeds[i], settings.build(), true); if (autoManageMinMasterNodes) {
nodeAndClient.startNode(); settings.put(INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s"); // we wait at the end
publishNode(nodeAndClient); }
NodeAndClient nodeAndClient = buildNode(i, sharedNodesSeeds[i], settings.build(), true, defaultMinMasterNodes);
toStartAndPublish.add(nodeAndClient);
} }
for (int i = numSharedDedicatedMasterNodes + numSharedDataNodes; for (int i = numSharedDedicatedMasterNodes + numSharedDataNodes;
i < numSharedDedicatedMasterNodes + numSharedDataNodes + numSharedCoordOnlyNodes; i++) { i < numSharedDedicatedMasterNodes + numSharedDataNodes + numSharedCoordOnlyNodes; i++) {
final Builder settings = Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false) final Builder settings = Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false)
.put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_INGEST_SETTING.getKey(), false); .put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_INGEST_SETTING.getKey(), false);
NodeAndClient nodeAndClient = buildNode(i, sharedNodesSeeds[i], settings.build(), true); NodeAndClient nodeAndClient = buildNode(i, sharedNodesSeeds[i], settings.build(), true, defaultMinMasterNodes);
nodeAndClient.startNode(); toStartAndPublish.add(nodeAndClient);
publishNode(nodeAndClient);
} }
startAndPublishNodesAndClients(toStartAndPublish);
nextNodeId.set(newSize); nextNodeId.set(newSize);
assert size() == newSize; assert size() == newSize;
if (newSize > 0) { if (newSize > 0) {
ClusterHealthResponse response = client().admin().cluster().prepareHealth() validateClusterFormed();
.setWaitForNodes(Integer.toString(newSize)).get();
if (response.isTimedOut()) {
logger.warn("failed to wait for a cluster of size [{}], got [{}]", newSize, response);
throw new IllegalStateException("cluster failed to reach the expected size of [" + newSize + "]");
}
} }
logger.debug("Cluster is consistent again - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), newSize); logger.debug("Cluster is consistent again - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), newSize);
} }
/** ensure a cluster is form with {@link #nodes}.size() nodes. */
private void validateClusterFormed() {
String name = randomFrom(random, getNodeNames());
validateClusterFormed(name);
}
/** ensure a cluster is form with {@link #nodes}.size() nodes, but do so by using the client of the specified node */
private void validateClusterFormed(String viaNode) {
final int size = nodes.size();
logger.trace("validating cluster formed via [{}], expecting [{}]", viaNode, size);
final Client client = client(viaNode);
ClusterHealthResponse response = client.admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(size)).get();
if (response.isTimedOut()) {
logger.warn("failed to wait for a cluster of size [{}], got [{}]", size, response);
throw new IllegalStateException("cluster failed to reach the expected size of [" + size + "]");
}
}
@Override @Override
public synchronized void afterTest() throws IOException { public synchronized void afterTest() throws IOException {
wipePendingDataDirectories(); wipePendingDataDirectories();
@ -1234,9 +1307,7 @@ public final class InternalTestCluster extends TestCluster {
NodeAndClient nodeAndClient = getRandomNodeAndClient(new DataNodePredicate()); NodeAndClient nodeAndClient = getRandomNodeAndClient(new DataNodePredicate());
if (nodeAndClient != null) { if (nodeAndClient != null) {
logger.info("Closing random node [{}] ", nodeAndClient.name); logger.info("Closing random node [{}] ", nodeAndClient.name);
removeDisruptionSchemeFromNode(nodeAndClient); stopNodesAndClient(nodeAndClient);
nodes.remove(nodeAndClient.name);
nodeAndClient.close();
return true; return true;
} }
return false; return false;
@ -1251,9 +1322,7 @@ public final class InternalTestCluster extends TestCluster {
NodeAndClient nodeAndClient = getRandomNodeAndClient(nc -> filter.test(nc.node.settings())); NodeAndClient nodeAndClient = getRandomNodeAndClient(nc -> filter.test(nc.node.settings()));
if (nodeAndClient != null) { if (nodeAndClient != null) {
logger.info("Closing filtered random node [{}] ", nodeAndClient.name); logger.info("Closing filtered random node [{}] ", nodeAndClient.name);
removeDisruptionSchemeFromNode(nodeAndClient); stopNodesAndClient(nodeAndClient);
nodes.remove(nodeAndClient.name);
nodeAndClient.close();
} }
} }
@ -1266,9 +1335,7 @@ public final class InternalTestCluster extends TestCluster {
String masterNodeName = getMasterName(); String masterNodeName = getMasterName();
assert nodes.containsKey(masterNodeName); assert nodes.containsKey(masterNodeName);
logger.info("Closing master node [{}] ", masterNodeName); logger.info("Closing master node [{}] ", masterNodeName);
removeDisruptionSchemeFromNode(nodes.get(masterNodeName)); stopNodesAndClient(nodes.get(masterNodeName));
NodeAndClient remove = nodes.remove(masterNodeName);
remove.close();
} }
/** /**
@ -1278,8 +1345,47 @@ public final class InternalTestCluster extends TestCluster {
NodeAndClient nodeAndClient = getRandomNodeAndClient(new MasterNodePredicate(getMasterName()).negate()); NodeAndClient nodeAndClient = getRandomNodeAndClient(new MasterNodePredicate(getMasterName()).negate());
if (nodeAndClient != null) { if (nodeAndClient != null) {
logger.info("Closing random non master node [{}] current master [{}] ", nodeAndClient.name, getMasterName()); logger.info("Closing random non master node [{}] current master [{}] ", nodeAndClient.name, getMasterName());
stopNodesAndClient(nodeAndClient);
}
}
private synchronized void startAndPublishNodesAndClients(List<NodeAndClient> nodeAndClients) {
if (nodeAndClients.size() > 0) {
final int newMasters = (int) nodeAndClients.stream().filter(NodeAndClient::isMasterEligible)
.filter(nac -> nodes.containsKey(nac.name) == false) // filter out old masters
.count();
final int currentMasters = getMasterNodesCount();
if (autoManageMinMasterNodes && currentMasters > 1 && newMasters > 0) {
// special case for 1 node master - we can't update the min master nodes before we add more nodes.
updateMinMasterNodes(currentMasters + newMasters);
}
for (NodeAndClient nodeAndClient : nodeAndClients) {
nodeAndClient.startNode();
publishNode(nodeAndClient);
}
if (autoManageMinMasterNodes && currentMasters == 1 && newMasters > 0) {
// update once masters have joined
validateClusterFormed();
updateMinMasterNodes(currentMasters + newMasters);
}
}
}
private synchronized void stopNodesAndClient(NodeAndClient nodeAndClient) throws IOException {
stopNodesAndClients(Collections.singleton(nodeAndClient));
}
private synchronized void stopNodesAndClients(Collection<NodeAndClient> nodeAndClients) throws IOException {
if (autoManageMinMasterNodes && nodeAndClients.size() > 0) {
int masters = (int)nodeAndClients.stream().filter(NodeAndClient::isMasterEligible).count();
if (masters > 0) {
updateMinMasterNodes(getMasterNodesCount() - masters);
}
}
for (NodeAndClient nodeAndClient: nodeAndClients) {
removeDisruptionSchemeFromNode(nodeAndClient); removeDisruptionSchemeFromNode(nodeAndClient);
nodes.remove(nodeAndClient.name); NodeAndClient previous = nodes.remove(nodeAndClient.name);
assert previous == nodeAndClient;
nodeAndClient.close(); nodeAndClient.close();
} }
} }
@ -1319,8 +1425,7 @@ public final class InternalTestCluster extends TestCluster {
ensureOpen(); ensureOpen();
NodeAndClient nodeAndClient = getRandomNodeAndClient(predicate); NodeAndClient nodeAndClient = getRandomNodeAndClient(predicate);
if (nodeAndClient != null) { if (nodeAndClient != null) {
logger.info("Restarting random node [{}] ", nodeAndClient.name); restartNode(nodeAndClient, callback);
nodeAndClient.restart(callback, true);
} }
} }
@ -1331,93 +1436,10 @@ public final class InternalTestCluster extends TestCluster {
ensureOpen(); ensureOpen();
NodeAndClient nodeAndClient = nodes.get(nodeName); NodeAndClient nodeAndClient = nodes.get(nodeName);
if (nodeAndClient != null) { if (nodeAndClient != null) {
logger.info("Restarting node [{}] ", nodeAndClient.name); restartNode(nodeAndClient, callback);
nodeAndClient.restart(callback, true);
} }
} }
private synchronized void restartAllNodes(boolean rollingRestart, RestartCallback callback) throws Exception {
ensureOpen();
List<NodeAndClient> toRemove = new ArrayList<>();
try {
for (NodeAndClient nodeAndClient : nodes.values()) {
if (!callback.doRestart(nodeAndClient.name)) {
logger.info("Closing node [{}] during restart", nodeAndClient.name);
toRemove.add(nodeAndClient);
if (activeDisruptionScheme != null) {
activeDisruptionScheme.removeFromNode(nodeAndClient.name, this);
}
nodeAndClient.close();
}
}
} finally {
for (NodeAndClient nodeAndClient : toRemove) {
nodes.remove(nodeAndClient.name);
}
}
logger.info("Restarting remaining nodes rollingRestart [{}]", rollingRestart);
if (rollingRestart) {
int numNodesRestarted = 0;
for (NodeAndClient nodeAndClient : nodes.values()) {
callback.doAfterNodes(numNodesRestarted++, nodeAndClient.nodeClient());
logger.info("Restarting node [{}] ", nodeAndClient.name);
if (activeDisruptionScheme != null) {
activeDisruptionScheme.removeFromNode(nodeAndClient.name, this);
}
nodeAndClient.restart(callback, true);
if (activeDisruptionScheme != null) {
activeDisruptionScheme.applyToNode(nodeAndClient.name, this);
}
}
} else {
int numNodesRestarted = 0;
Set[] nodesRoleOrder = new Set[nextNodeId.get()];
Map<Set<Role>, List<NodeAndClient>> nodesByRoles = new HashMap<>();
for (NodeAndClient nodeAndClient : nodes.values()) {
callback.doAfterNodes(numNodesRestarted++, nodeAndClient.nodeClient());
logger.info("Stopping node [{}] ", nodeAndClient.name);
if (activeDisruptionScheme != null) {
activeDisruptionScheme.removeFromNode(nodeAndClient.name, this);
}
nodeAndClient.closeNode();
// delete data folders now, before we start other nodes that may claim it
nodeAndClient.clearDataIfNeeded(callback);
DiscoveryNode discoveryNode = getInstanceFromNode(ClusterService.class, nodeAndClient.node()).localNode();
nodesRoleOrder[nodeAndClient.nodeAndClientId()] = discoveryNode.getRoles();
nodesByRoles.computeIfAbsent(discoveryNode.getRoles(), k -> new ArrayList<>()).add(nodeAndClient);
}
assert nodesByRoles.values().stream().collect(Collectors.summingInt(List::size)) == nodes.size();
// randomize start up order, but making sure that:
// 1) A data folder that was assigned to a data node will stay so
// 2) Data nodes will get the same node lock ordinal range, so custom index paths (where the ordinal is used)
// will still belong to data nodes
for (List<NodeAndClient> sameRoleNodes : nodesByRoles.values()) {
Collections.shuffle(sameRoleNodes, random);
}
for (Set roles : nodesRoleOrder) {
if (roles == null) {
// if some nodes were stopped, we want have a role for them
continue;
}
NodeAndClient nodeAndClient = nodesByRoles.get(roles).remove(0);
logger.info("Starting node [{}] ", nodeAndClient.name);
if (activeDisruptionScheme != null) {
activeDisruptionScheme.removeFromNode(nodeAndClient.name, this);
}
// we already cleared data folders, before starting nodes up
nodeAndClient.restart(callback, false);
if (activeDisruptionScheme != null) {
activeDisruptionScheme.applyToNode(nodeAndClient.name, this);
}
}
}
}
public static final RestartCallback EMPTY_CALLBACK = new RestartCallback() { public static final RestartCallback EMPTY_CALLBACK = new RestartCallback() {
@Override @Override
public Settings onNodeStopped(String node) { public Settings onNodeStopped(String node) {
@ -1442,15 +1464,100 @@ public final class InternalTestCluster extends TestCluster {
/** /**
* Restarts all nodes in a rolling restart fashion ie. only restarts on node a time. * Restarts all nodes in a rolling restart fashion ie. only restarts on node a time.
*/ */
public void rollingRestart(RestartCallback function) throws Exception { public synchronized void rollingRestart(RestartCallback callback) throws Exception {
restartAllNodes(true, function); int numNodesRestarted = 0;
for (NodeAndClient nodeAndClient : nodes.values()) {
callback.doAfterNodes(numNodesRestarted++, nodeAndClient.nodeClient());
restartNode(nodeAndClient, callback);
}
}
private void restartNode(NodeAndClient nodeAndClient, RestartCallback callback) throws Exception {
logger.info("Restarting node [{}] ", nodeAndClient.name);
if (activeDisruptionScheme != null) {
activeDisruptionScheme.removeFromNode(nodeAndClient.name, this);
}
final int masterNodesCount = getMasterNodesCount();
// special case to allow stopping one node in a two node cluster and keep it functional
final boolean updateMinMaster = nodeAndClient.isMasterEligible() && masterNodesCount == 2 && autoManageMinMasterNodes;
if (updateMinMaster) {
updateMinMasterNodes(masterNodesCount - 1);
}
nodeAndClient.restart(callback, true, autoManageMinMasterNodes ? getMinMasterNodes(masterNodesCount) : -1);
if (activeDisruptionScheme != null) {
activeDisruptionScheme.applyToNode(nodeAndClient.name, this);
}
if (callback.validateClusterForming() || updateMinMaster) {
// we have to validate cluster size if updateMinMaster == true, because we need the
// second node to join in order to increment min_master_nodes back to 2.
// we also have to do via the node that was just restarted as it may be that the master didn't yet process
// the fact it left
validateClusterFormed(nodeAndClient.name);
}
if (updateMinMaster) {
updateMinMasterNodes(masterNodesCount);
}
} }
/** /**
* Restarts all nodes in the cluster. It first stops all nodes and then restarts all the nodes again. * Restarts all nodes in the cluster. It first stops all nodes and then restarts all the nodes again.
*/ */
public void fullRestart(RestartCallback function) throws Exception { public synchronized void fullRestart(RestartCallback callback) throws Exception {
restartAllNodes(false, function); int numNodesRestarted = 0;
Map<Set<Role>, List<NodeAndClient>> nodesByRoles = new HashMap<>();
Set[] rolesOrderedByOriginalStartupOrder = new Set[nextNodeId.get()];
for (NodeAndClient nodeAndClient : nodes.values()) {
callback.doAfterNodes(numNodesRestarted++, nodeAndClient.nodeClient());
logger.info("Stopping node [{}] ", nodeAndClient.name);
if (activeDisruptionScheme != null) {
activeDisruptionScheme.removeFromNode(nodeAndClient.name, this);
}
nodeAndClient.closeNode();
// delete data folders now, before we start other nodes that may claim it
nodeAndClient.clearDataIfNeeded(callback);
DiscoveryNode discoveryNode = getInstanceFromNode(ClusterService.class, nodeAndClient.node()).localNode();
rolesOrderedByOriginalStartupOrder[nodeAndClient.nodeAndClientId] = discoveryNode.getRoles();
nodesByRoles.computeIfAbsent(discoveryNode.getRoles(), k -> new ArrayList<>()).add(nodeAndClient);
}
assert nodesByRoles.values().stream().collect(Collectors.summingInt(List::size)) == nodes.size();
// randomize start up order, but making sure that:
// 1) A data folder that was assigned to a data node will stay so
// 2) Data nodes will get the same node lock ordinal range, so custom index paths (where the ordinal is used)
// will still belong to data nodes
for (List<NodeAndClient> sameRoleNodes : nodesByRoles.values()) {
Collections.shuffle(sameRoleNodes, random);
}
List<NodeAndClient> startUpOrder = new ArrayList<>();
for (Set roles : rolesOrderedByOriginalStartupOrder) {
if (roles == null) {
// if some nodes were stopped, we want have a role for that ordinal
continue;
}
final List<NodeAndClient> nodesByRole = nodesByRoles.get(roles);
startUpOrder.add(nodesByRole.remove(0));
}
assert nodesByRoles.values().stream().collect(Collectors.summingInt(List::size)) == 0;
// do two rounds to minimize pinging (mock zen pings pings with no delay and can create a lot of logs)
for (NodeAndClient nodeAndClient : startUpOrder) {
logger.info("resetting node [{}] ", nodeAndClient.name);
// we already cleared data folders, before starting nodes up
nodeAndClient.recreateNodeOnRestart(callback, false, autoManageMinMasterNodes ? getMinMasterNodes(getMasterNodesCount()) : -1);
}
for (NodeAndClient nodeAndClient : startUpOrder) {
logger.info("starting node [{}] ", nodeAndClient.name);
nodeAndClient.startNode();
if (activeDisruptionScheme != null) {
activeDisruptionScheme.applyToNode(nodeAndClient.name, this);
}
}
if (callback.validateClusterForming()) {
validateClusterFormed();
}
} }
@ -1534,19 +1641,51 @@ public final class InternalTestCluster extends TestCluster {
* Starts a node with the given settings and returns it's name. * Starts a node with the given settings and returns it's name.
*/ */
public synchronized String startNode(Settings settings) { public synchronized String startNode(Settings settings) {
NodeAndClient buildNode = buildNode(settings); final int defaultMinMasterNodes = getMinMasterNodes(getMasterNodesCount() + (Node.NODE_MASTER_SETTING.get(settings) ? 1 : 0));
buildNode.startNode(); NodeAndClient buildNode = buildNode(settings, defaultMinMasterNodes);
publishNode(buildNode); startAndPublishNodesAndClients(Collections.singletonList(buildNode));
return buildNode.name; return buildNode.name;
} }
/**
* updates the min master nodes setting in the current running cluster.
*
* @param eligibleMasterNodeCount the number of master eligible nodes to use as basis for the min master node setting
*/
private int updateMinMasterNodes(int eligibleMasterNodeCount) {
assert autoManageMinMasterNodes;
final int minMasterNodes = getMinMasterNodes(eligibleMasterNodeCount);
if (getMasterNodesCount() > 0) {
// there should be at least one master to update
logger.debug("updating min_master_nodes to [{}]", minMasterNodes);
try {
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(
Settings.builder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), minMasterNodes)
));
} catch (Exception e) {
throw new ElasticsearchException("failed to update minimum master node to [{}] (current masters [{}])", e,
minMasterNodes, getMasterNodesCount());
}
}
return minMasterNodes;
}
/** calculates a min master nodes value based on the given number of master nodes */
private int getMinMasterNodes(int eligibleMasterNodes) {
return eligibleMasterNodes / 2 + 1;
}
private int getMasterNodesCount() {
return (int)nodes.values().stream().filter(n -> Node.NODE_MASTER_SETTING.get(n.node().settings())).count();
}
public synchronized Async<List<String>> startMasterOnlyNodesAsync(int numNodes) { public synchronized Async<List<String>> startMasterOnlyNodesAsync(int numNodes) {
return startMasterOnlyNodesAsync(numNodes, Settings.EMPTY); return startMasterOnlyNodesAsync(numNodes, Settings.EMPTY);
} }
public synchronized Async<List<String>> startMasterOnlyNodesAsync(int numNodes, Settings settings) { public synchronized Async<List<String>> startMasterOnlyNodesAsync(int numNodes, Settings settings) {
Settings settings1 = Settings.builder().put(settings).put(Node.NODE_MASTER_SETTING.getKey(), true).put(Node.NODE_DATA_SETTING.getKey(), false).build(); Settings settings1 = Settings.builder().put(settings).put(Node.NODE_MASTER_SETTING.getKey(), true).put(Node.NODE_DATA_SETTING.getKey(), false).build();
return startNodesAsync(numNodes, settings1, Version.CURRENT); return startNodesAsync(numNodes, settings1);
} }
public synchronized Async<List<String>> startDataOnlyNodesAsync(int numNodes) { public synchronized Async<List<String>> startDataOnlyNodesAsync(int numNodes) {
@ -1555,7 +1694,7 @@ public final class InternalTestCluster extends TestCluster {
public synchronized Async<List<String>> startDataOnlyNodesAsync(int numNodes, Settings settings) { public synchronized Async<List<String>> startDataOnlyNodesAsync(int numNodes, Settings settings) {
Settings settings1 = Settings.builder().put(settings).put(Node.NODE_MASTER_SETTING.getKey(), false).put(Node.NODE_DATA_SETTING.getKey(), true).build(); Settings settings1 = Settings.builder().put(settings).put(Node.NODE_MASTER_SETTING.getKey(), false).put(Node.NODE_DATA_SETTING.getKey(), true).build();
return startNodesAsync(numNodes, settings1, Version.CURRENT); return startNodesAsync(numNodes, settings1);
} }
public synchronized Async<String> startMasterOnlyNodeAsync() { public synchronized Async<String> startMasterOnlyNodeAsync() {
@ -1564,7 +1703,7 @@ public final class InternalTestCluster extends TestCluster {
public synchronized Async<String> startMasterOnlyNodeAsync(Settings settings) { public synchronized Async<String> startMasterOnlyNodeAsync(Settings settings) {
Settings settings1 = Settings.builder().put(settings).put(Node.NODE_MASTER_SETTING.getKey(), true).put(Node.NODE_DATA_SETTING.getKey(), false).build(); Settings settings1 = Settings.builder().put(settings).put(Node.NODE_MASTER_SETTING.getKey(), true).put(Node.NODE_DATA_SETTING.getKey(), false).build();
return startNodeAsync(settings1, Version.CURRENT); return startNodeAsync(settings1);
} }
public synchronized String startMasterOnlyNode(Settings settings) { public synchronized String startMasterOnlyNode(Settings settings) {
@ -1578,7 +1717,7 @@ public final class InternalTestCluster extends TestCluster {
public synchronized Async<String> startDataOnlyNodeAsync(Settings settings) { public synchronized Async<String> startDataOnlyNodeAsync(Settings settings) {
Settings settings1 = Settings.builder().put(settings).put(Node.NODE_MASTER_SETTING.getKey(), false).put(Node.NODE_DATA_SETTING.getKey(), true).build(); Settings settings1 = Settings.builder().put(settings).put(Node.NODE_MASTER_SETTING.getKey(), false).put(Node.NODE_DATA_SETTING.getKey(), true).build();
return startNodeAsync(settings1, Version.CURRENT); return startNodeAsync(settings1);
} }
public synchronized String startDataOnlyNode(Settings settings) { public synchronized String startDataOnlyNode(Settings settings) {
@ -1590,21 +1729,25 @@ public final class InternalTestCluster extends TestCluster {
* Starts a node in an async manner with the given settings and returns future with its name. * Starts a node in an async manner with the given settings and returns future with its name.
*/ */
public synchronized Async<String> startNodeAsync() { public synchronized Async<String> startNodeAsync() {
return startNodeAsync(Settings.EMPTY, Version.CURRENT); return startNodeAsync(Settings.EMPTY);
} }
/** /**
* Starts a node in an async manner with the given settings and returns future with its name. * Starts a node in an async manner with the given settings and returns future with its name.
*/ */
public synchronized Async<String> startNodeAsync(final Settings settings) { public synchronized Async<String> startNodeAsync(final Settings settings) {
return startNodeAsync(settings, Version.CURRENT); final int defaultMinMasterNodes;
if (autoManageMinMasterNodes) {
int mastersDelta = Node.NODE_MASTER_SETTING.get(settings) ? 1 : 0;
defaultMinMasterNodes = updateMinMasterNodes(getMasterNodesCount() + mastersDelta);
} else {
defaultMinMasterNodes = -1;
}
return startNodeAsync(settings, defaultMinMasterNodes);
} }
/** private synchronized Async<String> startNodeAsync(final Settings settings, int defaultMinMasterNodes) {
* Starts a node in an async manner with the given settings and version and returns future with its name. final NodeAndClient buildNode = buildNode(settings, defaultMinMasterNodes);
*/
public synchronized Async<String> startNodeAsync(final Settings settings, final Version version) {
final NodeAndClient buildNode = buildNode(settings);
final Future<String> submit = executor.submit(() -> { final Future<String> submit = executor.submit(() -> {
buildNode.startNode(); buildNode.startNode();
publishNode(buildNode); publishNode(buildNode);
@ -1613,27 +1756,28 @@ public final class InternalTestCluster extends TestCluster {
return () -> submit.get(); return () -> submit.get();
} }
/** /**
* Starts multiple nodes in an async manner and returns future with its name. * Starts multiple nodes in an async manner and returns future with its name.
*/ */
public synchronized Async<List<String>> startNodesAsync(final int numNodes) { public synchronized Async<List<String>> startNodesAsync(final int numNodes) {
return startNodesAsync(numNodes, Settings.EMPTY, Version.CURRENT); return startNodesAsync(numNodes, Settings.EMPTY);
} }
/** /**
* Starts multiple nodes in an async manner with the given settings and returns future with its name. * Starts multiple nodes in an async manner with the given settings and returns future with its name.
*/ */
public synchronized Async<List<String>> startNodesAsync(final int numNodes, final Settings settings) { public synchronized Async<List<String>> startNodesAsync(final int numNodes, final Settings settings) {
return startNodesAsync(numNodes, settings, Version.CURRENT); final int defaultMinMasterNodes;
} if (autoManageMinMasterNodes) {
int mastersDelta = Node.NODE_MASTER_SETTING.get(settings) ? numNodes : 0;
/** defaultMinMasterNodes = updateMinMasterNodes(getMasterNodesCount() + mastersDelta);
* Starts multiple nodes in an async manner with the given settings and version and returns future with its name. } else {
*/ defaultMinMasterNodes = -1;
public synchronized Async<List<String>> startNodesAsync(final int numNodes, final Settings settings, final Version version) { }
final List<Async<String>> asyncs = new ArrayList<>(); final List<Async<String>> asyncs = new ArrayList<>();
for (int i = 0; i < numNodes; i++) { for (int i = 0; i < numNodes; i++) {
asyncs.add(startNodeAsync(settings, version)); asyncs.add(startNodeAsync(settings, defaultMinMasterNodes));
} }
return () -> { return () -> {
@ -1650,9 +1794,16 @@ public final class InternalTestCluster extends TestCluster {
* The order of the node names returned matches the order of the settings provided. * The order of the node names returned matches the order of the settings provided.
*/ */
public synchronized Async<List<String>> startNodesAsync(final Settings... settings) { public synchronized Async<List<String>> startNodesAsync(final Settings... settings) {
final int defaultMinMasterNodes;
if (autoManageMinMasterNodes) {
int mastersDelta = (int) Stream.of(settings).filter(Node.NODE_MASTER_SETTING::get).count();
defaultMinMasterNodes = updateMinMasterNodes(getMasterNodesCount() + mastersDelta);
} else {
defaultMinMasterNodes = -1;
}
List<Async<String>> asyncs = new ArrayList<>(); List<Async<String>> asyncs = new ArrayList<>();
for (Settings setting : settings) { for (Settings setting : settings) {
asyncs.add(startNodeAsync(setting, Version.CURRENT)); asyncs.add(startNodeAsync(setting, defaultMinMasterNodes));
} }
return () -> { return () -> {
List<String> ids = new ArrayList<>(); List<String> ids = new ArrayList<>();
@ -1683,6 +1834,11 @@ public final class InternalTestCluster extends TestCluster {
return dataAndMasterNodes().size(); return dataAndMasterNodes().size();
} }
public synchronized int numMasterNodes() {
return filterNodes(nodes, NodeAndClient::isMasterEligible).size();
}
public void setDisruptionScheme(ServiceDisruptionScheme scheme) { public void setDisruptionScheme(ServiceDisruptionScheme scheme) {
clearDisruptionScheme(); clearDisruptionScheme();
scheme.applyToCluster(this); scheme.applyToCluster(this);
@ -1887,14 +2043,8 @@ public final class InternalTestCluster extends TestCluster {
return false; return false;
} }
/** returns true if the restart should also validate the cluster has reformed */
/** public boolean validateClusterForming() { return true; }
* If this returns <code>false</code> the node with the given node name will not be restarted. It will be
* closed and removed from the cluster. Returns <code>true</code> by default.
*/
public boolean doRestart(String nodeName) {
return true;
}
} }
public Settings getDefaultSettings() { public Settings getDefaultSettings() {

View File

@ -18,11 +18,6 @@
*/ */
package org.elasticsearch.test.discovery; package org.elasticsearch.test.discovery;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.AbstractComponent;
@ -32,13 +27,22 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.discovery.zen.PingContextProvider; import org.elasticsearch.discovery.zen.PingContextProvider;
import org.elasticsearch.discovery.zen.ZenPing; import org.elasticsearch.discovery.zen.ZenPing;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
/** /**
* A {@link ZenPing} implementation which returns results based on an static in-memory map. This allows pinging * A {@link ZenPing} implementation which returns results based on an static in-memory map. This allows pinging
* to be immediate and can be used to speed up tests. * to be immediate and can be used to speed up tests.
*/ */
public final class MockZenPing extends AbstractComponent implements ZenPing { public final class MockZenPing extends AbstractComponent implements ZenPing {
static final Map<ClusterName, Set<MockZenPing>> activeNodesPerCluster = ConcurrentCollections.newConcurrentMap(); static final Map<ClusterName, Set<MockZenPing>> activeNodesPerCluster = new HashMap<>();
/** a set of the last discovered pings. used to throttle busy spinning where MockZenPing will keep returning the same results */
private Set<MockZenPing> lastDiscoveredPings = null;
private volatile PingContextProvider contextProvider; private volatile PingContextProvider contextProvider;
@ -50,18 +54,34 @@ public final class MockZenPing extends AbstractComponent implements ZenPing {
public void start(PingContextProvider contextProvider) { public void start(PingContextProvider contextProvider) {
this.contextProvider = contextProvider; this.contextProvider = contextProvider;
assert contextProvider != null; assert contextProvider != null;
boolean added = getActiveNodesForCurrentCluster().add(this); synchronized (activeNodesPerCluster) {
assert added; boolean added = getActiveNodesForCurrentCluster().add(this);
assert added;
activeNodesPerCluster.notifyAll();
}
} }
@Override @Override
public void ping(PingListener listener, TimeValue timeout) { public void ping(PingListener listener, TimeValue timeout) {
logger.info("pinging using mock zen ping"); logger.info("pinging using mock zen ping");
List<PingResponse> responseList = getActiveNodesForCurrentCluster().stream() synchronized (activeNodesPerCluster) {
.filter(p -> p != this) // remove this as pings are not expected to return the local node Set<MockZenPing> activeNodes = getActiveNodesForCurrentCluster();
.map(MockZenPing::getPingResponse) if (activeNodes.equals(lastDiscoveredPings)) {
.collect(Collectors.toList()); try {
listener.onPing(responseList); logger.trace("nothing has changed since the last ping. waiting for a change");
activeNodesPerCluster.wait(timeout.millis());
} catch (InterruptedException e) {
}
activeNodes = getActiveNodesForCurrentCluster();
}
lastDiscoveredPings = activeNodes;
List<PingResponse> responseList = activeNodes.stream()
.filter(p -> p != this) // remove this as pings are not expected to return the local node
.map(MockZenPing::getPingResponse)
.collect(Collectors.toList());
listener.onPing(responseList);
}
} }
private ClusterName getClusterName() { private ClusterName getClusterName() {
@ -74,13 +94,17 @@ public final class MockZenPing extends AbstractComponent implements ZenPing {
} }
private Set<MockZenPing> getActiveNodesForCurrentCluster() { private Set<MockZenPing> getActiveNodesForCurrentCluster() {
assert Thread.holdsLock(activeNodesPerCluster);
return activeNodesPerCluster.computeIfAbsent(getClusterName(), return activeNodesPerCluster.computeIfAbsent(getClusterName(),
clusterName -> ConcurrentCollections.newConcurrentSet()); clusterName -> ConcurrentCollections.newConcurrentSet());
} }
@Override @Override
public void close() { public void close() {
boolean found = getActiveNodesForCurrentCluster().remove(this); synchronized (activeNodesPerCluster) {
assert found; boolean found = getActiveNodesForCurrentCluster().remove(this);
assert found;
activeNodesPerCluster.notifyAll();
}
} }
} }

View File

@ -636,7 +636,7 @@ public class ElasticsearchAssertions {
* a way that sucks less. * a way that sucks less.
*/ */
NamedWriteableRegistry registry; NamedWriteableRegistry registry;
if (ESIntegTestCase.isInternalCluster()) { if (ESIntegTestCase.isInternalCluster() && ESIntegTestCase.internalCluster().size() > 0) {
registry = ESIntegTestCase.internalCluster().getInstance(NamedWriteableRegistry.class); registry = ESIntegTestCase.internalCluster().getInstance(NamedWriteableRegistry.class);
} else { } else {
SearchModule searchModule = new SearchModule(Settings.EMPTY, false, emptyList()); SearchModule searchModule = new SearchModule(Settings.EMPTY, false, emptyList());

View File

@ -19,21 +19,6 @@
*/ */
package org.elasticsearch.test.test; package org.elasticsearch.test.test;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.function.Function;
import java.util.stream.Collectors;
import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.LuceneTestCase;
import org.elasticsearch.client.Client; import org.elasticsearch.client.Client;
@ -51,14 +36,32 @@ import org.elasticsearch.test.NodeConfigurationSource;
import org.elasticsearch.test.discovery.TestZenDiscovery; import org.elasticsearch.test.discovery.TestZenDiscovery;
import org.elasticsearch.transport.MockTcpTransportPlugin; import org.elasticsearch.transport.MockTcpTransportPlugin;
import org.elasticsearch.transport.TransportSettings; import org.elasticsearch.transport.TransportSettings;
import org.hamcrest.Matcher;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.function.Function;
import java.util.stream.Collectors;
import static org.elasticsearch.cluster.node.DiscoveryNode.Role.DATA; import static org.elasticsearch.cluster.node.DiscoveryNode.Role.DATA;
import static org.elasticsearch.cluster.node.DiscoveryNode.Role.INGEST; import static org.elasticsearch.cluster.node.DiscoveryNode.Role.INGEST;
import static org.elasticsearch.cluster.node.DiscoveryNode.Role.MASTER; import static org.elasticsearch.cluster.node.DiscoveryNode.Role.MASTER;
import static org.elasticsearch.discovery.zen.ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFileExists; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFileExists;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFileNotExists; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFileNotExists;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.hasEntry;
import static org.hamcrest.Matchers.hasKey;
import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.not;
/** /**
@ -81,10 +84,10 @@ public class InternalTestClusterTests extends ESTestCase {
Path baseDir = createTempDir(); Path baseDir = createTempDir();
InternalTestCluster cluster0 = new InternalTestCluster(clusterSeed, baseDir, masterNodes, InternalTestCluster cluster0 = new InternalTestCluster(clusterSeed, baseDir, masterNodes,
minNumDataNodes, maxNumDataNodes, clusterName, nodeConfigurationSource, numClientNodes, randomBoolean(), minNumDataNodes, maxNumDataNodes, clusterName, nodeConfigurationSource, numClientNodes,
enableHttpPipelining, nodePrefix, Collections.emptyList(), Function.identity()); enableHttpPipelining, nodePrefix, Collections.emptyList(), Function.identity());
InternalTestCluster cluster1 = new InternalTestCluster(clusterSeed, baseDir, masterNodes, InternalTestCluster cluster1 = new InternalTestCluster(clusterSeed, baseDir, masterNodes,
minNumDataNodes, maxNumDataNodes, clusterName, nodeConfigurationSource, numClientNodes, randomBoolean(), minNumDataNodes, maxNumDataNodes, clusterName, nodeConfigurationSource, numClientNodes,
enableHttpPipelining, nodePrefix, Collections.emptyList(), Function.identity()); enableHttpPipelining, nodePrefix, Collections.emptyList(), Function.identity());
// TODO: this is not ideal - we should have a way to make sure ports are initialized in the same way // TODO: this is not ideal - we should have a way to make sure ports are initialized in the same way
assertClusters(cluster0, cluster1, false); assertClusters(cluster0, cluster1, false);
@ -116,7 +119,8 @@ public class InternalTestClusterTests extends ESTestCase {
public static void assertSettings(Settings left, Settings right, boolean checkClusterUniqueSettings) { public static void assertSettings(Settings left, Settings right, boolean checkClusterUniqueSettings) {
Set<Map.Entry<String, String>> entries0 = left.getAsMap().entrySet(); Set<Map.Entry<String, String>> entries0 = left.getAsMap().entrySet();
Map<String, String> entries1 = right.getAsMap(); Map<String, String> entries1 = right.getAsMap();
assertThat(entries0.size(), equalTo(entries1.size())); assertThat("--> left:\n" + left.toDelimitedString('\n') + "\n-->right:\n" + right.toDelimitedString('\n'),
entries0.size(), equalTo(entries1.size()));
for (Map.Entry<String, String> entry : entries0) { for (Map.Entry<String, String> entry : entries0) {
if (clusterUniqueSettings.contains(entry.getKey()) && checkClusterUniqueSettings == false) { if (clusterUniqueSettings.contains(entry.getKey()) && checkClusterUniqueSettings == false) {
continue; continue;
@ -125,6 +129,41 @@ public class InternalTestClusterTests extends ESTestCase {
} }
} }
private void assertMMNinNodeSetting(InternalTestCluster cluster, int masterNodes) {
for (final String node : cluster.getNodeNames()) {
assertMMNinNodeSetting(node, cluster, masterNodes);
}
}
private void assertMMNinNodeSetting(String node, InternalTestCluster cluster, int masterNodes) {
final int minMasterNodes = masterNodes / 2 + 1;
final Matcher<Map<? extends String, ? extends String>> minMasterMatcher =
hasEntry(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), Integer.toString(minMasterNodes));
final Matcher<Map<? extends String, ?>> noMinMasterNodesMatcher = not(hasKey(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey()));
Settings nodeSettings = cluster.client(node).admin().cluster().prepareNodesInfo(node).get().getNodes().get(0).getSettings();
assertThat("node setting of node [" + node + "] has the wrong min_master_node setting: ["
+ nodeSettings.get(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey()) + "]",
nodeSettings.getAsMap(),
cluster.getAutoManageMinMasterNode() ? minMasterMatcher: noMinMasterNodesMatcher);
}
private void assertMMNinClusterSetting(InternalTestCluster cluster, int masterNodes) {
final int minMasterNodes = masterNodes / 2 + 1;
Matcher<Map<? extends String, ? extends String>> minMasterMatcher =
hasEntry(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), Integer.toString(minMasterNodes));
Matcher<Map<? extends String, ?>> noMinMasterNodesMatcher = not(hasKey(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey()));
for (final String node : cluster.getNodeNames()) {
Settings stateSettings = cluster.client(node).admin().cluster().prepareState().setLocal(true)
.get().getState().getMetaData().settings();
assertThat("dynamic setting for node [" + node + "] has the wrong min_master_node setting : ["
+ stateSettings.get(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey()) + "]",
stateSettings.getAsMap(),
cluster.getAutoManageMinMasterNode() ? minMasterMatcher: noMinMasterNodesMatcher);
}
}
public void testBeforeTest() throws Exception { public void testBeforeTest() throws Exception {
long clusterSeed = randomLong(); long clusterSeed = randomLong();
boolean masterNodes = randomBoolean(); boolean masterNodes = randomBoolean();
@ -156,11 +195,12 @@ public class InternalTestClusterTests extends ESTestCase {
Path baseDir = createTempDir(); Path baseDir = createTempDir();
final List<Class<? extends Plugin>> mockPlugins = Arrays.asList(MockTcpTransportPlugin.class, TestZenDiscovery.TestPlugin.class); final List<Class<? extends Plugin>> mockPlugins = Arrays.asList(MockTcpTransportPlugin.class, TestZenDiscovery.TestPlugin.class);
final boolean autoManageMinMasterNodes = randomBoolean();
InternalTestCluster cluster0 = new InternalTestCluster(clusterSeed, baseDir, masterNodes, InternalTestCluster cluster0 = new InternalTestCluster(clusterSeed, baseDir, masterNodes,
minNumDataNodes, maxNumDataNodes, clusterName1, nodeConfigurationSource, numClientNodes, autoManageMinMasterNodes, minNumDataNodes, maxNumDataNodes, clusterName1, nodeConfigurationSource, numClientNodes,
enableHttpPipelining, nodePrefix, mockPlugins, Function.identity()); enableHttpPipelining, nodePrefix, mockPlugins, Function.identity());
InternalTestCluster cluster1 = new InternalTestCluster(clusterSeed, baseDir, masterNodes, InternalTestCluster cluster1 = new InternalTestCluster(clusterSeed, baseDir, masterNodes,
minNumDataNodes, maxNumDataNodes, clusterName2, nodeConfigurationSource, numClientNodes, autoManageMinMasterNodes, minNumDataNodes, maxNumDataNodes, clusterName2, nodeConfigurationSource, numClientNodes,
enableHttpPipelining, nodePrefix, mockPlugins, Function.identity()); enableHttpPipelining, nodePrefix, mockPlugins, Function.identity());
assertClusters(cluster0, cluster1, false); assertClusters(cluster0, cluster1, false);
@ -182,6 +222,8 @@ public class InternalTestClusterTests extends ESTestCase {
assertSettings(client.settings(), other.settings(), false); assertSettings(client.settings(), other.settings(), false);
} }
assertArrayEquals(cluster0.getNodeNames(), cluster1.getNodeNames()); assertArrayEquals(cluster0.getNodeNames(), cluster1.getNodeNames());
assertMMNinNodeSetting(cluster0, cluster0.numMasterNodes());
assertMMNinNodeSetting(cluster1, cluster0.numMasterNodes());
cluster0.afterTest(); cluster0.afterTest();
cluster1.afterTest(); cluster1.afterTest();
} finally { } finally {
@ -216,12 +258,15 @@ public class InternalTestClusterTests extends ESTestCase {
boolean enableHttpPipelining = randomBoolean(); boolean enableHttpPipelining = randomBoolean();
String nodePrefix = "test"; String nodePrefix = "test";
Path baseDir = createTempDir(); Path baseDir = createTempDir();
final boolean autoManageMinMasterNodes = randomBoolean();
InternalTestCluster cluster = new InternalTestCluster(clusterSeed, baseDir, masterNodes, InternalTestCluster cluster = new InternalTestCluster(clusterSeed, baseDir, masterNodes,
minNumDataNodes, maxNumDataNodes, clusterName1, nodeConfigurationSource, numClientNodes, autoManageMinMasterNodes, minNumDataNodes, maxNumDataNodes, clusterName1, nodeConfigurationSource, numClientNodes,
enableHttpPipelining, nodePrefix, Arrays.asList(MockTcpTransportPlugin.class, TestZenDiscovery.TestPlugin.class), enableHttpPipelining, nodePrefix, Arrays.asList(MockTcpTransportPlugin.class, TestZenDiscovery.TestPlugin.class),
Function.identity()); Function.identity());
try { try {
cluster.beforeTest(random(), 0.0); cluster.beforeTest(random(), 0.0);
final int originalMasterCount = cluster.numMasterNodes();
assertMMNinNodeSetting(cluster, originalMasterCount);
final Map<String,Path[]> shardNodePaths = new HashMap<>(); final Map<String,Path[]> shardNodePaths = new HashMap<>();
for (String name: cluster.getNodeNames()) { for (String name: cluster.getNodeNames()) {
shardNodePaths.put(name, getNodePaths(cluster, name)); shardNodePaths.put(name, getNodePaths(cluster, name));
@ -230,7 +275,15 @@ public class InternalTestClusterTests extends ESTestCase {
Path dataPath = getNodePaths(cluster, poorNode)[0]; Path dataPath = getNodePaths(cluster, poorNode)[0];
final Path testMarker = dataPath.resolve("testMarker"); final Path testMarker = dataPath.resolve("testMarker");
Files.createDirectories(testMarker); Files.createDirectories(testMarker);
int expectedMasterCount = originalMasterCount;
if (cluster.getInstance(ClusterService.class, poorNode).localNode().isMasterNode()) {
expectedMasterCount--;
}
cluster.stopRandomNode(InternalTestCluster.nameFilter(poorNode)); cluster.stopRandomNode(InternalTestCluster.nameFilter(poorNode));
if (expectedMasterCount != originalMasterCount) {
// check for updated
assertMMNinClusterSetting(cluster, expectedMasterCount);
}
assertFileExists(testMarker); // stopping a node half way shouldn't clean data assertFileExists(testMarker); // stopping a node half way shouldn't clean data
final String stableNode = randomFrom(cluster.getNodeNames()); final String stableNode = randomFrom(cluster.getNodeNames());
@ -240,10 +293,17 @@ public class InternalTestClusterTests extends ESTestCase {
Files.createDirectories(stableTestMarker); Files.createDirectories(stableTestMarker);
final String newNode1 = cluster.startNode(); final String newNode1 = cluster.startNode();
expectedMasterCount++;
assertThat(getNodePaths(cluster, newNode1)[0], equalTo(dataPath)); assertThat(getNodePaths(cluster, newNode1)[0], equalTo(dataPath));
assertFileExists(testMarker); // starting a node should re-use data folders and not clean it assertFileExists(testMarker); // starting a node should re-use data folders and not clean it
if (expectedMasterCount > 1) { // this is the first master, it's in cluster state settings won't be updated
assertMMNinClusterSetting(cluster, expectedMasterCount);
}
assertMMNinNodeSetting(newNode1, cluster, expectedMasterCount);
final String newNode2 = cluster.startNode(); final String newNode2 = cluster.startNode();
expectedMasterCount++;
assertMMNinClusterSetting(cluster, expectedMasterCount);
final Path newDataPath = getNodePaths(cluster, newNode2)[0]; final Path newDataPath = getNodePaths(cluster, newNode2)[0];
final Path newTestMarker = newDataPath.resolve("newTestMarker"); final Path newTestMarker = newDataPath.resolve("newTestMarker");
assertThat(newDataPath, not(dataPath)); assertThat(newDataPath, not(dataPath));
@ -262,6 +322,7 @@ public class InternalTestClusterTests extends ESTestCase {
assertThat("data paths for " + name + " changed", getNodePaths(cluster, name), assertThat("data paths for " + name + " changed", getNodePaths(cluster, name),
equalTo(shardNodePaths.get(name))); equalTo(shardNodePaths.get(name)));
} }
assertMMNinNodeSetting(cluster, originalMasterCount);
} finally { } finally {
cluster.close(); cluster.close();
@ -280,7 +341,7 @@ public class InternalTestClusterTests extends ESTestCase {
public void testDifferentRolesMaintainPathOnRestart() throws Exception { public void testDifferentRolesMaintainPathOnRestart() throws Exception {
final Path baseDir = createTempDir(); final Path baseDir = createTempDir();
final int numNodes = 5; final int numNodes = 5;
InternalTestCluster cluster = new InternalTestCluster(randomLong(), baseDir, true, 0, 0, "test", InternalTestCluster cluster = new InternalTestCluster(randomLong(), baseDir, true, true, 0, 0, "test",
new NodeConfigurationSource() { new NodeConfigurationSource() {
@Override @Override
public Settings nodeSettings(int nodeOrdinal) { public Settings nodeSettings(int nodeOrdinal) {
@ -301,7 +362,9 @@ public class InternalTestClusterTests extends ESTestCase {
try { try {
Map<DiscoveryNode.Role, Set<String>> pathsPerRole = new HashMap<>(); Map<DiscoveryNode.Role, Set<String>> pathsPerRole = new HashMap<>();
for (int i = 0; i < numNodes; i++) { for (int i = 0; i < numNodes; i++) {
final DiscoveryNode.Role role = randomFrom(MASTER, DiscoveryNode.Role.DATA, DiscoveryNode.Role.INGEST); final DiscoveryNode.Role role = i == numNodes -1 && pathsPerRole.containsKey(MASTER) == false ?
MASTER : // last noe and still no master ofr the cluster
randomFrom(MASTER, DiscoveryNode.Role.DATA, DiscoveryNode.Role.INGEST);
final String node; final String node;
switch (role) { switch (role) {
case MASTER: case MASTER:
@ -343,6 +406,59 @@ public class InternalTestClusterTests extends ESTestCase {
} finally { } finally {
cluster.close(); cluster.close();
} }
}
public void testTwoNodeCluster() throws Exception {
final boolean autoManageMinMasterNodes = randomBoolean();
NodeConfigurationSource nodeConfigurationSource = new NodeConfigurationSource() {
@Override
public Settings nodeSettings(int nodeOrdinal) {
return Settings.builder().put(NetworkModule.HTTP_ENABLED.getKey(), false)
.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 2)
.put(NetworkModule.TRANSPORT_TYPE_KEY, MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME)
.build();
}
@Override
public Settings transportClientSettings() {
return Settings.builder()
.put(NetworkModule.TRANSPORT_TYPE_KEY, MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME).build();
}
};
boolean enableHttpPipelining = randomBoolean();
String nodePrefix = "test";
Path baseDir = createTempDir();
InternalTestCluster cluster = new InternalTestCluster(randomLong(), baseDir, false, autoManageMinMasterNodes, 2, 2,
"test", nodeConfigurationSource, 0, enableHttpPipelining, nodePrefix,
Arrays.asList(MockTcpTransportPlugin.class, TestZenDiscovery.TestPlugin.class), Function.identity());
try {
cluster.beforeTest(random(), 0.0);
assertMMNinNodeSetting(cluster, 2);
switch (randomInt(2)) {
case 0:
cluster.stopRandomDataNode();
assertMMNinClusterSetting(cluster, 1);
cluster.startNode();
assertMMNinClusterSetting(cluster, 2);
assertMMNinNodeSetting(cluster, 2);
break;
case 1:
cluster.rollingRestart(new InternalTestCluster.RestartCallback() {
@Override
public Settings onNodeStopped(String nodeName) throws Exception {
assertMMNinClusterSetting(cluster, 1);
return super.onNodeStopped(nodeName);
}
});
assertMMNinClusterSetting(cluster, 2);
break;
case 2:
cluster.fullRestart();
break;
}
assertMMNinNodeSetting(cluster, 2);
} finally {
cluster.close();
}
} }
} }