Merge branch 'master' into docs/completion_suggester
This commit is contained in:
commit
afacc18dcc
|
@ -1450,7 +1450,6 @@
|
|||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]engine[/\\]MockInternalEngine.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]hamcrest[/\\]ElasticsearchAssertions.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]junit[/\\]listeners[/\\]LoggingListener.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]junit[/\\]rule[/\\]RepeatOnExceptionRule.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]ESRestTestCase.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]RestTestExecutionContext.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]client[/\\]RestClient.java" checks="LineLength" />
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
elasticsearch = 5.0.0
|
||||
elasticsearch = 5.0.0-alpha2
|
||||
lucene = 6.0.0
|
||||
|
||||
# optional dependencies
|
||||
|
|
|
@ -68,11 +68,13 @@ public class Version {
|
|||
public static final Version V_2_3_0 = new Version(V_2_3_0_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final int V_2_3_1_ID = 2030199;
|
||||
public static final Version V_2_3_1 = new Version(V_2_3_1_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final int V_2_3_2_ID = 2030299;
|
||||
public static final Version V_2_3_2 = new Version(V_2_3_2_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final int V_5_0_0_alpha1_ID = 5000001;
|
||||
public static final Version V_5_0_0_alpha1 = new Version(V_5_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
|
||||
public static final int V_5_0_0_ID = 5000099;
|
||||
public static final Version V_5_0_0 = new Version(V_5_0_0_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
|
||||
public static final Version CURRENT = V_5_0_0;
|
||||
public static final int V_5_0_0_alpha2_ID = 5000002;
|
||||
public static final Version V_5_0_0_alpha2 = new Version(V_5_0_0_alpha2_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
|
||||
public static final Version CURRENT = V_5_0_0_alpha2;
|
||||
|
||||
static {
|
||||
assert CURRENT.luceneVersion.equals(org.apache.lucene.util.Version.LATEST) : "Version must be upgraded to ["
|
||||
|
@ -85,10 +87,12 @@ public class Version {
|
|||
|
||||
public static Version fromId(int id) {
|
||||
switch (id) {
|
||||
case V_5_0_0_ID:
|
||||
return V_5_0_0;
|
||||
case V_5_0_0_alpha2_ID:
|
||||
return V_5_0_0_alpha2;
|
||||
case V_5_0_0_alpha1_ID:
|
||||
return V_5_0_0_alpha1;
|
||||
case V_2_3_2_ID:
|
||||
return V_2_3_2;
|
||||
case V_2_3_1_ID:
|
||||
return V_2_3_1;
|
||||
case V_2_3_0_ID:
|
||||
|
|
|
@ -397,7 +397,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
for (ShardRoutingState s : state) {
|
||||
if (s == ShardRoutingState.UNASSIGNED) {
|
||||
for (ShardRouting unassignedShard : unassignedShards) {
|
||||
if (unassignedShard.index().equals(index)) {
|
||||
if (unassignedShard.index().getName().equals(index)) {
|
||||
shards.add(unassignedShard);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,69 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.lucene.search;
|
||||
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.Weight;
|
||||
import org.apache.lucene.search.ConstantScoreWeight;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Explanation;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* A query that matches no documents and prints the reason why in the toString method.
|
||||
*/
|
||||
public class MatchNoDocsQuery extends Query {
|
||||
/**
|
||||
* The reason why the query does not match any document.
|
||||
*/
|
||||
private final String reason;
|
||||
|
||||
public MatchNoDocsQuery(String reason) {
|
||||
this.reason = reason;
|
||||
}
|
||||
|
||||
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
|
||||
return new ConstantScoreWeight(this) {
|
||||
@Override
|
||||
public void extractTerms(Set<Term> terms) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
|
||||
return Explanation.noMatch(reason);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Scorer scorer(LeafReaderContext context) throws IOException {
|
||||
return null;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString(String field) {
|
||||
return "MatchNoDocsQuery[\"" + reason + "\"]";
|
||||
}
|
||||
}
|
|
@ -26,16 +26,12 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import java.util.Arrays;
|
||||
import java.util.concurrent.BlockingQueue;
|
||||
import java.util.concurrent.LinkedTransferQueue;
|
||||
import java.util.concurrent.SynchronousQueue;
|
||||
import java.util.concurrent.ThreadFactory;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class EsExecutors {
|
||||
|
||||
/**
|
||||
|
@ -62,16 +58,11 @@ public class EsExecutors {
|
|||
|
||||
public static EsThreadPoolExecutor newScaling(String name, int min, int max, long keepAliveTime, TimeUnit unit, ThreadFactory threadFactory, ThreadContext contextHolder) {
|
||||
ExecutorScalingQueue<Runnable> queue = new ExecutorScalingQueue<>();
|
||||
// we force the execution, since we might run into concurrency issues in offer for ScalingBlockingQueue
|
||||
EsThreadPoolExecutor executor = new EsThreadPoolExecutor(name, min, max, keepAliveTime, unit, queue, threadFactory, new ForceQueuePolicy(), contextHolder);
|
||||
queue.executor = executor;
|
||||
return executor;
|
||||
}
|
||||
|
||||
public static EsThreadPoolExecutor newCached(String name, long keepAliveTime, TimeUnit unit, ThreadFactory threadFactory, ThreadContext contextHolder) {
|
||||
return new EsThreadPoolExecutor(name, 0, Integer.MAX_VALUE, keepAliveTime, unit, new SynchronousQueue<Runnable>(), threadFactory, new EsAbortPolicy(), contextHolder);
|
||||
}
|
||||
|
||||
public static EsThreadPoolExecutor newFixed(String name, int size, int queueCapacity, ThreadFactory threadFactory, ThreadContext contextHolder) {
|
||||
BlockingQueue<Runnable> queue;
|
||||
if (queueCapacity < 0) {
|
||||
|
@ -114,6 +105,7 @@ public class EsExecutors {
|
|||
}
|
||||
|
||||
static class EsThreadFactory implements ThreadFactory {
|
||||
|
||||
final ThreadGroup group;
|
||||
final AtomicInteger threadNumber = new AtomicInteger(1);
|
||||
final String namePrefix;
|
||||
|
@ -133,6 +125,7 @@ public class EsExecutors {
|
|||
t.setDaemon(true);
|
||||
return t;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -141,7 +134,6 @@ public class EsExecutors {
|
|||
private EsExecutors() {
|
||||
}
|
||||
|
||||
|
||||
static class ExecutorScalingQueue<E> extends LinkedTransferQueue<E> {
|
||||
|
||||
ThreadPoolExecutor executor;
|
||||
|
@ -151,9 +143,17 @@ public class EsExecutors {
|
|||
|
||||
@Override
|
||||
public boolean offer(E e) {
|
||||
// first try to transfer to a waiting worker thread
|
||||
if (!tryTransfer(e)) {
|
||||
// check if there might be spare capacity in the thread
|
||||
// pool executor
|
||||
int left = executor.getMaximumPoolSize() - executor.getCorePoolSize();
|
||||
if (left > 0) {
|
||||
// reject queuing the task to force the thread pool
|
||||
// executor to add a worker if it can; combined
|
||||
// with ForceQueuePolicy, this causes the thread
|
||||
// pool to always scale up to max pool size and we
|
||||
// only queue when there is no spare capacity
|
||||
return false;
|
||||
} else {
|
||||
return super.offer(e);
|
||||
|
@ -162,6 +162,7 @@ public class EsExecutors {
|
|||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -184,4 +185,5 @@ public class EsExecutors {
|
|||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -40,12 +40,9 @@ import org.elasticsearch.index.mapper.core.DateFieldMapper;
|
|||
import org.elasticsearch.index.mapper.core.KeywordFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.KeywordFieldMapper.KeywordFieldType;
|
||||
import org.elasticsearch.index.mapper.core.LegacyDateFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.LegacyDoubleFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.LegacyFloatFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.LegacyIntegerFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.LegacyLongFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.NumberFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.StringFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.StringFieldMapper.StringFieldType;
|
||||
import org.elasticsearch.index.mapper.core.TextFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.TextFieldMapper.TextFieldType;
|
||||
|
@ -639,7 +636,7 @@ final class DocumentParser {
|
|||
}
|
||||
|
||||
private static Mapper.Builder<?, ?> newLongBuilder(String name, Version indexCreated) {
|
||||
if (indexCreated.onOrAfter(Version.V_5_0_0)) {
|
||||
if (indexCreated.onOrAfter(Version.V_5_0_0_alpha2)) {
|
||||
return new NumberFieldMapper.Builder(name, NumberFieldMapper.NumberType.LONG);
|
||||
} else {
|
||||
return new LegacyLongFieldMapper.Builder(name);
|
||||
|
@ -647,7 +644,7 @@ final class DocumentParser {
|
|||
}
|
||||
|
||||
private static Mapper.Builder<?, ?> newFloatBuilder(String name, Version indexCreated) {
|
||||
if (indexCreated.onOrAfter(Version.V_5_0_0)) {
|
||||
if (indexCreated.onOrAfter(Version.V_5_0_0_alpha2)) {
|
||||
return new NumberFieldMapper.Builder(name, NumberFieldMapper.NumberType.FLOAT);
|
||||
} else {
|
||||
return new LegacyFloatFieldMapper.Builder(name);
|
||||
|
@ -655,7 +652,7 @@ final class DocumentParser {
|
|||
}
|
||||
|
||||
private static Mapper.Builder<?, ?> newDateBuilder(String name, FormatDateTimeFormatter dateTimeFormatter, Version indexCreated) {
|
||||
if (indexCreated.onOrAfter(Version.V_5_0_0)) {
|
||||
if (indexCreated.onOrAfter(Version.V_5_0_0_alpha2)) {
|
||||
DateFieldMapper.Builder builder = new DateFieldMapper.Builder(name);
|
||||
if (dateTimeFormatter != null) {
|
||||
builder.dateTimeFormatter(dateTimeFormatter);
|
||||
|
|
|
@ -139,7 +139,7 @@ public class DateFieldMapper extends FieldMapper implements AllFieldMapper.Inclu
|
|||
|
||||
@Override
|
||||
public Mapper.Builder<?,?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
if (parserContext.indexVersionCreated().before(Version.V_5_0_0)) {
|
||||
if (parserContext.indexVersionCreated().before(Version.V_5_0_0_alpha2)) {
|
||||
return new LegacyDateFieldMapper.TypeParser().parse(name, node, parserContext);
|
||||
}
|
||||
Builder builder = new Builder(name);
|
||||
|
|
|
@ -32,12 +32,10 @@ import org.apache.lucene.util.LegacyNumericUtils;
|
|||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.fieldstats.FieldStats;
|
||||
import org.elasticsearch.common.Explicit;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.Fuzziness;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||
import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType;
|
||||
import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData;
|
||||
|
@ -78,7 +76,7 @@ public class LegacyByteFieldMapper extends LegacyNumberFieldMapper {
|
|||
|
||||
@Override
|
||||
public LegacyByteFieldMapper build(BuilderContext context) {
|
||||
if (context.indexCreatedVersion().onOrAfter(Version.V_5_0_0)) {
|
||||
if (context.indexCreatedVersion().onOrAfter(Version.V_5_0_0_alpha2)) {
|
||||
throw new IllegalStateException("Cannot use legacy numeric types after 5.0");
|
||||
}
|
||||
setupFieldType(context);
|
||||
|
|
|
@ -32,7 +32,6 @@ import org.elasticsearch.Version;
|
|||
import org.elasticsearch.action.fieldstats.FieldStats;
|
||||
import org.elasticsearch.common.Explicit;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.joda.DateMathParser;
|
||||
import org.elasticsearch.common.joda.FormatDateTimeFormatter;
|
||||
import org.elasticsearch.common.joda.Joda;
|
||||
|
@ -116,7 +115,7 @@ public class LegacyDateFieldMapper extends LegacyNumberFieldMapper {
|
|||
|
||||
@Override
|
||||
public LegacyDateFieldMapper build(BuilderContext context) {
|
||||
if (context.indexCreatedVersion().onOrAfter(Version.V_5_0_0)) {
|
||||
if (context.indexCreatedVersion().onOrAfter(Version.V_5_0_0_alpha2)) {
|
||||
throw new IllegalStateException("Cannot use legacy numeric types after 5.0");
|
||||
}
|
||||
setupFieldType(context);
|
||||
|
|
|
@ -39,7 +39,6 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.unit.Fuzziness;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||
import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType;
|
||||
import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData;
|
||||
|
@ -80,7 +79,7 @@ public class LegacyDoubleFieldMapper extends LegacyNumberFieldMapper {
|
|||
|
||||
@Override
|
||||
public LegacyDoubleFieldMapper build(BuilderContext context) {
|
||||
if (context.indexCreatedVersion().onOrAfter(Version.V_5_0_0)) {
|
||||
if (context.indexCreatedVersion().onOrAfter(Version.V_5_0_0_alpha2)) {
|
||||
throw new IllegalStateException("Cannot use legacy numeric types after 5.0");
|
||||
}
|
||||
setupFieldType(context);
|
||||
|
|
|
@ -34,7 +34,6 @@ import org.apache.lucene.util.NumericUtils;
|
|||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.fieldstats.FieldStats;
|
||||
import org.elasticsearch.common.Explicit;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.Fuzziness;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
@ -79,7 +78,7 @@ public class LegacyFloatFieldMapper extends LegacyNumberFieldMapper {
|
|||
|
||||
@Override
|
||||
public LegacyFloatFieldMapper build(BuilderContext context) {
|
||||
if (context.indexCreatedVersion().onOrAfter(Version.V_5_0_0)) {
|
||||
if (context.indexCreatedVersion().onOrAfter(Version.V_5_0_0_alpha2)) {
|
||||
throw new IllegalStateException("Cannot use legacy numeric types after 5.0");
|
||||
}
|
||||
setupFieldType(context);
|
||||
|
|
|
@ -33,13 +33,10 @@ import org.apache.lucene.util.LegacyNumericUtils;
|
|||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.fieldstats.FieldStats;
|
||||
import org.elasticsearch.common.Explicit;
|
||||
import org.elasticsearch.common.Numbers;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.Fuzziness;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||
import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType;
|
||||
import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData;
|
||||
|
@ -85,7 +82,7 @@ public class LegacyIntegerFieldMapper extends LegacyNumberFieldMapper {
|
|||
|
||||
@Override
|
||||
public LegacyIntegerFieldMapper build(BuilderContext context) {
|
||||
if (context.indexCreatedVersion().onOrAfter(Version.V_5_0_0)) {
|
||||
if (context.indexCreatedVersion().onOrAfter(Version.V_5_0_0_alpha2)) {
|
||||
throw new IllegalStateException("Cannot use legacy numeric types after 5.0");
|
||||
}
|
||||
setupFieldType(context);
|
||||
|
|
|
@ -33,13 +33,10 @@ import org.apache.lucene.util.LegacyNumericUtils;
|
|||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.fieldstats.FieldStats;
|
||||
import org.elasticsearch.common.Explicit;
|
||||
import org.elasticsearch.common.Numbers;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.Fuzziness;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||
import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType;
|
||||
import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData;
|
||||
|
@ -85,7 +82,7 @@ public class LegacyLongFieldMapper extends LegacyNumberFieldMapper {
|
|||
|
||||
@Override
|
||||
public LegacyLongFieldMapper build(BuilderContext context) {
|
||||
if (context.indexCreatedVersion().onOrAfter(Version.V_5_0_0)) {
|
||||
if (context.indexCreatedVersion().onOrAfter(Version.V_5_0_0_alpha2)) {
|
||||
throw new IllegalStateException("Cannot use legacy numeric types after 5.0");
|
||||
}
|
||||
setupFieldType(context);
|
||||
|
|
|
@ -33,12 +33,10 @@ import org.apache.lucene.util.LegacyNumericUtils;
|
|||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.fieldstats.FieldStats;
|
||||
import org.elasticsearch.common.Explicit;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.Fuzziness;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||
import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType;
|
||||
import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData;
|
||||
|
@ -80,7 +78,7 @@ public class LegacyShortFieldMapper extends LegacyNumberFieldMapper {
|
|||
|
||||
@Override
|
||||
public LegacyShortFieldMapper build(BuilderContext context) {
|
||||
if (context.indexCreatedVersion().onOrAfter(Version.V_5_0_0)) {
|
||||
if (context.indexCreatedVersion().onOrAfter(Version.V_5_0_0_alpha2)) {
|
||||
throw new IllegalStateException("Cannot use legacy numeric types after 5.0");
|
||||
}
|
||||
setupFieldType(context);
|
||||
|
|
|
@ -25,7 +25,6 @@ import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
|||
import org.apache.lucene.document.Field;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Explicit;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||
|
@ -75,7 +74,7 @@ public class LegacyTokenCountFieldMapper extends LegacyIntegerFieldMapper {
|
|||
|
||||
@Override
|
||||
public LegacyTokenCountFieldMapper build(BuilderContext context) {
|
||||
if (context.indexCreatedVersion().onOrAfter(Version.V_5_0_0)) {
|
||||
if (context.indexCreatedVersion().onOrAfter(Version.V_5_0_0_alpha2)) {
|
||||
throw new IllegalStateException("Cannot use legacy numeric types after 5.0");
|
||||
}
|
||||
setupFieldType(context);
|
||||
|
|
|
@ -136,7 +136,7 @@ public class NumberFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
|||
|
||||
@Override
|
||||
public Mapper.Builder<?,?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
if (parserContext.indexVersionCreated().before(Version.V_5_0_0)) {
|
||||
if (parserContext.indexVersionCreated().before(Version.V_5_0_0_alpha2)) {
|
||||
switch (type) {
|
||||
case BYTE:
|
||||
return new LegacyByteFieldMapper.TypeParser().parse(name, node, parserContext);
|
||||
|
|
|
@ -25,7 +25,6 @@ import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
|||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||
|
@ -83,7 +82,7 @@ public class TokenCountFieldMapper extends FieldMapper {
|
|||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
if (parserContext.indexVersionCreated().before(Version.V_5_0_0)) {
|
||||
if (parserContext.indexVersionCreated().before(Version.V_5_0_0_alpha2)) {
|
||||
return new LegacyTokenCountFieldMapper.TypeParser().parse(name, node, parserContext);
|
||||
}
|
||||
TokenCountFieldMapper.Builder builder = new TokenCountFieldMapper.Builder(name);
|
||||
|
|
|
@ -26,7 +26,6 @@ import org.elasticsearch.Version;
|
|||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.Explicit;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.collect.Iterators;
|
||||
import org.elasticsearch.common.geo.GeoPoint;
|
||||
import org.elasticsearch.common.geo.GeoUtils;
|
||||
|
@ -157,7 +156,7 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
|
|||
|
||||
context.path().add(name);
|
||||
if (enableLatLon) {
|
||||
if (context.indexCreatedVersion().before(Version.V_5_0_0)) {
|
||||
if (context.indexCreatedVersion().before(Version.V_5_0_0_alpha2)) {
|
||||
LegacyNumberFieldMapper.Builder<?, ?> latMapperBuilder = new LegacyDoubleFieldMapper.Builder(Names.LAT).includeInAll(false);
|
||||
LegacyNumberFieldMapper.Builder<?, ?> lonMapperBuilder = new LegacyDoubleFieldMapper.Builder(Names.LON).includeInAll(false);
|
||||
if (precisionStep != null) {
|
||||
|
|
|
@ -48,14 +48,12 @@ import org.elasticsearch.index.mapper.ParseContext;
|
|||
import org.elasticsearch.index.mapper.core.LegacyNumberFieldMapper.Defaults;
|
||||
import org.elasticsearch.index.mapper.core.TypeParsers;
|
||||
import org.elasticsearch.index.mapper.internal.AllFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ip.LegacyIpFieldMapper;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetAddress;
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -105,7 +103,7 @@ public class IpFieldMapper extends FieldMapper implements AllFieldMapper.Include
|
|||
|
||||
@Override
|
||||
public Mapper.Builder<?,?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
if (parserContext.indexVersionCreated().before(Version.V_5_0_0)) {
|
||||
if (parserContext.indexVersionCreated().before(Version.V_5_0_0_alpha2)) {
|
||||
return new LegacyIpFieldMapper.TypeParser().parse(name, node, parserContext);
|
||||
}
|
||||
Builder builder = new Builder(name);
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.index.mapper.ip;
|
||||
|
||||
import org.apache.lucene.analysis.LegacyNumericTokenStream;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
|
@ -33,7 +32,6 @@ import org.elasticsearch.Version;
|
|||
import org.elasticsearch.action.fieldstats.FieldStats;
|
||||
import org.elasticsearch.common.Explicit;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.network.Cidrs;
|
||||
import org.elasticsearch.common.network.InetAddresses;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -122,7 +120,7 @@ public class LegacyIpFieldMapper extends LegacyNumberFieldMapper {
|
|||
|
||||
@Override
|
||||
public LegacyIpFieldMapper build(BuilderContext context) {
|
||||
if (context.indexCreatedVersion().onOrAfter(Version.V_5_0_0)) {
|
||||
if (context.indexCreatedVersion().onOrAfter(Version.V_5_0_0_alpha2)) {
|
||||
throw new IllegalStateException("Cannot use legacy numeric types after 5.0");
|
||||
}
|
||||
setupFieldType(context);
|
||||
|
|
|
@ -309,12 +309,6 @@ public class DefaultSearchContext extends SearchContext {
|
|||
return this.searchType;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SearchContext searchType(SearchType searchType) {
|
||||
this.searchType = searchType;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SearchShardTarget shardTarget() {
|
||||
return this.shardTarget;
|
||||
|
|
|
@ -105,11 +105,6 @@ public abstract class FilteredSearchContext extends SearchContext {
|
|||
return in.searchType();
|
||||
}
|
||||
|
||||
@Override
|
||||
public SearchContext searchType(SearchType searchType) {
|
||||
return in.searchType(searchType);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SearchShardTarget shardTarget() {
|
||||
return in.shardTarget();
|
||||
|
|
|
@ -129,8 +129,6 @@ public abstract class SearchContext implements Releasable {
|
|||
|
||||
public abstract SearchType searchType();
|
||||
|
||||
public abstract SearchContext searchType(SearchType searchType);
|
||||
|
||||
public abstract SearchShardTarget shardTarget();
|
||||
|
||||
public abstract int numberOfShards();
|
||||
|
|
|
@ -87,11 +87,6 @@ public class SubSearchContext extends FilteredSearchContext {
|
|||
throw new UnsupportedOperationException("this context should be read only");
|
||||
}
|
||||
|
||||
@Override
|
||||
public SearchContext searchType(SearchType searchType) {
|
||||
throw new UnsupportedOperationException("this context should be read only");
|
||||
}
|
||||
|
||||
@Override
|
||||
public SearchContext queryBoost(float queryBoost) {
|
||||
throw new UnsupportedOperationException("Not supported");
|
||||
|
|
|
@ -91,7 +91,6 @@ public class ThreadPool extends AbstractComponent implements Closeable {
|
|||
}
|
||||
|
||||
public enum ThreadPoolType {
|
||||
CACHED("cached"),
|
||||
DIRECT("direct"),
|
||||
FIXED("fixed"),
|
||||
SCALING("scaling");
|
||||
|
@ -125,12 +124,12 @@ public class ThreadPool extends AbstractComponent implements Closeable {
|
|||
}
|
||||
}
|
||||
|
||||
public static Map<String, ThreadPoolType> THREAD_POOL_TYPES;
|
||||
public static final Map<String, ThreadPoolType> THREAD_POOL_TYPES;
|
||||
|
||||
static {
|
||||
HashMap<String, ThreadPoolType> map = new HashMap<>();
|
||||
map.put(Names.SAME, ThreadPoolType.DIRECT);
|
||||
map.put(Names.GENERIC, ThreadPoolType.CACHED);
|
||||
map.put(Names.GENERIC, ThreadPoolType.SCALING);
|
||||
map.put(Names.LISTENER, ThreadPoolType.FIXED);
|
||||
map.put(Names.GET, ThreadPoolType.FIXED);
|
||||
map.put(Names.INDEX, ThreadPoolType.FIXED);
|
||||
|
@ -153,33 +152,67 @@ public class ThreadPool extends AbstractComponent implements Closeable {
|
|||
executorSettings.put(name, settings);
|
||||
}
|
||||
|
||||
private static class ExecutorSettingsBuilder {
|
||||
Map<String, String> settings = new HashMap<>();
|
||||
private static abstract class ExecutorSettingsBuilder<T extends ExecutorSettingsBuilder<T>> {
|
||||
|
||||
public ExecutorSettingsBuilder(String name) {
|
||||
settings.put("name", name);
|
||||
settings.put("type", THREAD_POOL_TYPES.get(name).getType());
|
||||
private final Settings.Builder builder;
|
||||
|
||||
protected ExecutorSettingsBuilder(String name, ThreadPoolType threadPoolType) {
|
||||
if (THREAD_POOL_TYPES.get(name) != threadPoolType) {
|
||||
throw new IllegalArgumentException("thread pool [" + name + "] must be of type [" + threadPoolType + "]");
|
||||
}
|
||||
builder = Settings.builder();
|
||||
builder.put("name", name);
|
||||
builder.put("type", threadPoolType.getType());
|
||||
}
|
||||
|
||||
public ExecutorSettingsBuilder size(int availableProcessors) {
|
||||
return add("size", Integer.toString(availableProcessors));
|
||||
}
|
||||
|
||||
public ExecutorSettingsBuilder queueSize(int queueSize) {
|
||||
return add("queue_size", Integer.toString(queueSize));
|
||||
}
|
||||
|
||||
public ExecutorSettingsBuilder keepAlive(String keepAlive) {
|
||||
public T keepAlive(String keepAlive) {
|
||||
return add("keep_alive", keepAlive);
|
||||
}
|
||||
|
||||
private ExecutorSettingsBuilder add(String key, String value) {
|
||||
settings.put(key, value);
|
||||
return this;
|
||||
public T queueSize(int queueSize) {
|
||||
return add("queue_size", queueSize);
|
||||
}
|
||||
|
||||
public Settings build() {
|
||||
return Settings.builder().put(settings).build();
|
||||
protected T add(String setting, int value) {
|
||||
return add(setting, Integer.toString(value));
|
||||
}
|
||||
|
||||
|
||||
protected T add(String setting, String value) {
|
||||
builder.put(setting, value);
|
||||
@SuppressWarnings("unchecked") final T executor = (T)this;
|
||||
return executor;
|
||||
}
|
||||
|
||||
public final Settings build() { return builder.build(); }
|
||||
|
||||
}
|
||||
|
||||
private static class FixedExecutorSettingsBuilder extends ExecutorSettingsBuilder<FixedExecutorSettingsBuilder> {
|
||||
|
||||
public FixedExecutorSettingsBuilder(String name) {
|
||||
super(name, ThreadPoolType.FIXED);
|
||||
}
|
||||
|
||||
public FixedExecutorSettingsBuilder size(int size) {
|
||||
return add("size", Integer.toString(size));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private static class ScalingExecutorSettingsBuilder extends ExecutorSettingsBuilder<ScalingExecutorSettingsBuilder> {
|
||||
|
||||
public ScalingExecutorSettingsBuilder(String name) {
|
||||
super(name, ThreadPoolType.SCALING);
|
||||
}
|
||||
|
||||
public ScalingExecutorSettingsBuilder min(int min) {
|
||||
return add("min", min);
|
||||
}
|
||||
|
||||
|
||||
public ScalingExecutorSettingsBuilder size(int size) {
|
||||
return add("size", size);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -215,25 +248,26 @@ public class ThreadPool extends AbstractComponent implements Closeable {
|
|||
validate(groupSettings);
|
||||
|
||||
int availableProcessors = EsExecutors.boundedNumberOfProcessors(settings);
|
||||
int halfProcMaxAt5 = Math.min(((availableProcessors + 1) / 2), 5);
|
||||
int halfProcMaxAt10 = Math.min(((availableProcessors + 1) / 2), 10);
|
||||
int halfProcMaxAt5 = halfNumberOfProcessorsMaxFive(availableProcessors);
|
||||
int halfProcMaxAt10 = halfNumberOfProcessorsMaxTen(availableProcessors);
|
||||
Map<String, Settings> defaultExecutorTypeSettings = new HashMap<>();
|
||||
add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.GENERIC).size(4 * availableProcessors).keepAlive("30s"));
|
||||
add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.INDEX).size(availableProcessors).queueSize(200));
|
||||
add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.BULK).size(availableProcessors).queueSize(50));
|
||||
add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.GET).size(availableProcessors).queueSize(1000));
|
||||
add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.SEARCH).size(((availableProcessors * 3) / 2) + 1).queueSize(1000));
|
||||
add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.MANAGEMENT).size(5).keepAlive("5m"));
|
||||
int genericThreadPoolMax = boundedBy(4 * availableProcessors, 128, 512);
|
||||
add(defaultExecutorTypeSettings, new ScalingExecutorSettingsBuilder(Names.GENERIC).min(4).size(genericThreadPoolMax).keepAlive("30s"));
|
||||
add(defaultExecutorTypeSettings, new FixedExecutorSettingsBuilder(Names.INDEX).size(availableProcessors).queueSize(200));
|
||||
add(defaultExecutorTypeSettings, new FixedExecutorSettingsBuilder(Names.BULK).size(availableProcessors).queueSize(50));
|
||||
add(defaultExecutorTypeSettings, new FixedExecutorSettingsBuilder(Names.GET).size(availableProcessors).queueSize(1000));
|
||||
add(defaultExecutorTypeSettings, new FixedExecutorSettingsBuilder(Names.SEARCH).size(((availableProcessors * 3) / 2) + 1).queueSize(1000));
|
||||
add(defaultExecutorTypeSettings, new ScalingExecutorSettingsBuilder(Names.MANAGEMENT).min(1).size(5).keepAlive("5m"));
|
||||
// no queue as this means clients will need to handle rejections on listener queue even if the operation succeeded
|
||||
// the assumption here is that the listeners should be very lightweight on the listeners side
|
||||
add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.LISTENER).size(halfProcMaxAt10));
|
||||
add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.FLUSH).size(halfProcMaxAt5).keepAlive("5m"));
|
||||
add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.REFRESH).size(halfProcMaxAt10).keepAlive("5m"));
|
||||
add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.WARMER).size(halfProcMaxAt5).keepAlive("5m"));
|
||||
add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.SNAPSHOT).size(halfProcMaxAt5).keepAlive("5m"));
|
||||
add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.FORCE_MERGE).size(1));
|
||||
add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.FETCH_SHARD_STARTED).size(availableProcessors * 2).keepAlive("5m"));
|
||||
add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.FETCH_SHARD_STORE).size(availableProcessors * 2).keepAlive("5m"));
|
||||
add(defaultExecutorTypeSettings, new FixedExecutorSettingsBuilder(Names.LISTENER).size(halfProcMaxAt10));
|
||||
add(defaultExecutorTypeSettings, new ScalingExecutorSettingsBuilder(Names.FLUSH).min(1).size(halfProcMaxAt5).keepAlive("5m"));
|
||||
add(defaultExecutorTypeSettings, new ScalingExecutorSettingsBuilder(Names.REFRESH).min(1).size(halfProcMaxAt10).keepAlive("5m"));
|
||||
add(defaultExecutorTypeSettings, new ScalingExecutorSettingsBuilder(Names.WARMER).min(1).size(halfProcMaxAt5).keepAlive("5m"));
|
||||
add(defaultExecutorTypeSettings, new ScalingExecutorSettingsBuilder(Names.SNAPSHOT).min(1).size(halfProcMaxAt5).keepAlive("5m"));
|
||||
add(defaultExecutorTypeSettings, new FixedExecutorSettingsBuilder(Names.FORCE_MERGE).size(1));
|
||||
add(defaultExecutorTypeSettings, new ScalingExecutorSettingsBuilder(Names.FETCH_SHARD_STARTED).min(1).size(availableProcessors * 2).keepAlive("5m"));
|
||||
add(defaultExecutorTypeSettings, new ScalingExecutorSettingsBuilder(Names.FETCH_SHARD_STORE).min(1).size(availableProcessors * 2).keepAlive("5m"));
|
||||
|
||||
this.defaultExecutorTypeSettings = unmodifiableMap(defaultExecutorTypeSettings);
|
||||
|
||||
|
@ -251,9 +285,6 @@ public class ThreadPool extends AbstractComponent implements Closeable {
|
|||
}
|
||||
|
||||
executors.put(Names.SAME, new ExecutorHolder(DIRECT_EXECUTOR, new Info(Names.SAME, ThreadPoolType.DIRECT)));
|
||||
if (!executors.get(Names.GENERIC).info.getThreadPoolType().equals(ThreadPoolType.CACHED)) {
|
||||
throw new IllegalArgumentException("generic thread pool must be of type cached");
|
||||
}
|
||||
this.executors = unmodifiableMap(executors);
|
||||
this.scheduler = new ScheduledThreadPoolExecutor(1, EsExecutors.daemonThreadFactory(settings, "scheduler"), new EsAbortPolicy());
|
||||
this.scheduler.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
|
||||
|
@ -447,49 +478,23 @@ public class ThreadPool extends AbstractComponent implements Closeable {
|
|||
ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(this.settings, name);
|
||||
if (ThreadPoolType.DIRECT == threadPoolType) {
|
||||
if (previousExecutorHolder != null) {
|
||||
logger.debug("updating thread_pool [{}], type [{}]", name, type);
|
||||
logger.debug("updating thread pool [{}], type [{}]", name, type);
|
||||
} else {
|
||||
logger.debug("creating thread_pool [{}], type [{}]", name, type);
|
||||
logger.debug("creating thread pool [{}], type [{}]", name, type);
|
||||
}
|
||||
return new ExecutorHolder(DIRECT_EXECUTOR, new Info(name, threadPoolType));
|
||||
} else if (ThreadPoolType.CACHED == threadPoolType) {
|
||||
if (!Names.GENERIC.equals(name)) {
|
||||
throw new IllegalArgumentException("thread pool type cached is reserved only for the generic thread pool and can not be applied to [" + name + "]");
|
||||
}
|
||||
TimeValue defaultKeepAlive = defaultSettings.getAsTime("keep_alive", timeValueMinutes(5));
|
||||
if (previousExecutorHolder != null) {
|
||||
if (ThreadPoolType.CACHED == previousInfo.getThreadPoolType()) {
|
||||
TimeValue updatedKeepAlive = settings.getAsTime("keep_alive", previousInfo.getKeepAlive());
|
||||
if (!previousInfo.getKeepAlive().equals(updatedKeepAlive)) {
|
||||
logger.debug("updating thread_pool [{}], type [{}], keep_alive [{}]", name, type, updatedKeepAlive);
|
||||
((EsThreadPoolExecutor) previousExecutorHolder.executor()).setKeepAliveTime(updatedKeepAlive.millis(), TimeUnit.MILLISECONDS);
|
||||
return new ExecutorHolder(previousExecutorHolder.executor(), new Info(name, threadPoolType, -1, -1, updatedKeepAlive, null));
|
||||
}
|
||||
return previousExecutorHolder;
|
||||
}
|
||||
if (previousInfo.getKeepAlive() != null) {
|
||||
defaultKeepAlive = previousInfo.getKeepAlive();
|
||||
}
|
||||
}
|
||||
TimeValue keepAlive = settings.getAsTime("keep_alive", defaultKeepAlive);
|
||||
if (previousExecutorHolder != null) {
|
||||
logger.debug("updating thread_pool [{}], type [{}], keep_alive [{}]", name, type, keepAlive);
|
||||
} else {
|
||||
logger.debug("creating thread_pool [{}], type [{}], keep_alive [{}]", name, type, keepAlive);
|
||||
}
|
||||
Executor executor = EsExecutors.newCached(name, keepAlive.millis(), TimeUnit.MILLISECONDS, threadFactory, threadContext);
|
||||
return new ExecutorHolder(executor, new Info(name, threadPoolType, -1, -1, keepAlive, null));
|
||||
} else if (ThreadPoolType.FIXED == threadPoolType) {
|
||||
int defaultSize = defaultSettings.getAsInt("size", EsExecutors.boundedNumberOfProcessors(settings));
|
||||
SizeValue defaultQueueSize = getAsSizeOrUnbounded(defaultSettings, "queue", getAsSizeOrUnbounded(defaultSettings, "queue_size", null));
|
||||
|
||||
if (previousExecutorHolder != null) {
|
||||
assert previousInfo != null;
|
||||
if (ThreadPoolType.FIXED == previousInfo.getThreadPoolType()) {
|
||||
SizeValue updatedQueueSize = getAsSizeOrUnbounded(settings, "capacity", getAsSizeOrUnbounded(settings, "queue", getAsSizeOrUnbounded(settings, "queue_size", previousInfo.getQueueSize())));
|
||||
if (Objects.equals(previousInfo.getQueueSize(), updatedQueueSize)) {
|
||||
int updatedSize = applyHardSizeLimit(name, settings.getAsInt("size", previousInfo.getMax()));
|
||||
if (previousInfo.getMax() != updatedSize) {
|
||||
logger.debug("updating thread_pool [{}], type [{}], size [{}], queue_size [{}]", name, type, updatedSize, updatedQueueSize);
|
||||
logger.debug("updating thread pool [{}], type [{}], size [{}], queue_size [{}]", name, type, updatedSize, updatedQueueSize);
|
||||
// if you think this code is crazy: that's because it is!
|
||||
if (updatedSize > previousInfo.getMax()) {
|
||||
((EsThreadPoolExecutor) previousExecutorHolder.executor()).setMaximumPoolSize(updatedSize);
|
||||
|
@ -511,20 +516,24 @@ public class ThreadPool extends AbstractComponent implements Closeable {
|
|||
|
||||
int size = applyHardSizeLimit(name, settings.getAsInt("size", defaultSize));
|
||||
SizeValue queueSize = getAsSizeOrUnbounded(settings, "capacity", getAsSizeOrUnbounded(settings, "queue", getAsSizeOrUnbounded(settings, "queue_size", defaultQueueSize)));
|
||||
logger.debug("creating thread_pool [{}], type [{}], size [{}], queue_size [{}]", name, type, size, queueSize);
|
||||
logger.debug("creating thread pool [{}], type [{}], size [{}], queue_size [{}]", name, type, size, queueSize);
|
||||
Executor executor = EsExecutors.newFixed(name, size, queueSize == null ? -1 : (int) queueSize.singles(), threadFactory, threadContext);
|
||||
return new ExecutorHolder(executor, new Info(name, threadPoolType, size, size, null, queueSize));
|
||||
} else if (ThreadPoolType.SCALING == threadPoolType) {
|
||||
TimeValue defaultKeepAlive = defaultSettings.getAsTime("keep_alive", timeValueMinutes(5));
|
||||
int defaultMin = defaultSettings.getAsInt("min", 1);
|
||||
int defaultSize = defaultSettings.getAsInt("size", EsExecutors.boundedNumberOfProcessors(settings));
|
||||
final Integer queueSize = settings.getAsInt("queue_size", defaultSettings.getAsInt("queue_size", null));
|
||||
if (queueSize != null) {
|
||||
throw new IllegalArgumentException("thread pool [" + name + "] of type scaling can not have its queue re-sized but was [" + queueSize + "]");
|
||||
}
|
||||
if (previousExecutorHolder != null) {
|
||||
if (ThreadPoolType.SCALING == previousInfo.getThreadPoolType()) {
|
||||
TimeValue updatedKeepAlive = settings.getAsTime("keep_alive", previousInfo.getKeepAlive());
|
||||
int updatedMin = settings.getAsInt("min", previousInfo.getMin());
|
||||
int updatedSize = settings.getAsInt("max", settings.getAsInt("size", previousInfo.getMax()));
|
||||
if (!previousInfo.getKeepAlive().equals(updatedKeepAlive) || previousInfo.getMin() != updatedMin || previousInfo.getMax() != updatedSize) {
|
||||
logger.debug("updating thread_pool [{}], type [{}], keep_alive [{}]", name, type, updatedKeepAlive);
|
||||
logger.debug("updating thread pool [{}], type [{}], keep_alive [{}]", name, type, updatedKeepAlive);
|
||||
if (!previousInfo.getKeepAlive().equals(updatedKeepAlive)) {
|
||||
((EsThreadPoolExecutor) previousExecutorHolder.executor()).setKeepAliveTime(updatedKeepAlive.millis(), TimeUnit.MILLISECONDS);
|
||||
}
|
||||
|
@ -552,9 +561,9 @@ public class ThreadPool extends AbstractComponent implements Closeable {
|
|||
int min = settings.getAsInt("min", defaultMin);
|
||||
int size = settings.getAsInt("max", settings.getAsInt("size", defaultSize));
|
||||
if (previousExecutorHolder != null) {
|
||||
logger.debug("updating thread_pool [{}], type [{}], min [{}], size [{}], keep_alive [{}]", name, type, min, size, keepAlive);
|
||||
logger.debug("updating thread pool [{}], type [{}], min [{}], size [{}], keep_alive [{}]", name, type, min, size, keepAlive);
|
||||
} else {
|
||||
logger.debug("creating thread_pool [{}], type [{}], min [{}], size [{}], keep_alive [{}]", name, type, min, size, keepAlive);
|
||||
logger.debug("creating thread pool [{}], type [{}], min [{}], size [{}], keep_alive [{}]", name, type, min, size, keepAlive);
|
||||
}
|
||||
Executor executor = EsExecutors.newScaling(name, min, size, keepAlive.millis(), TimeUnit.MILLISECONDS, threadFactory, threadContext);
|
||||
return new ExecutorHolder(executor, new Info(name, threadPoolType, min, size, keepAlive, null));
|
||||
|
@ -577,6 +586,32 @@ public class ThreadPool extends AbstractComponent implements Closeable {
|
|||
return size;
|
||||
}
|
||||
|
||||
/**
|
||||
* Constrains a value between minimum and maximum values
|
||||
* (inclusive).
|
||||
*
|
||||
* @param value the value to constrain
|
||||
* @param min the minimum acceptable value
|
||||
* @param max the maximum acceptable value
|
||||
* @return min if value is less than min, max if value is greater
|
||||
* than value, otherwise value
|
||||
*/
|
||||
static int boundedBy(int value, int min, int max) {
|
||||
return Math.min(max, Math.max(min, value));
|
||||
}
|
||||
|
||||
static int halfNumberOfProcessorsMaxFive(int numberOfProcessors) {
|
||||
return boundedBy((numberOfProcessors + 1) / 2, 1, 5);
|
||||
}
|
||||
|
||||
static int halfNumberOfProcessorsMaxTen(int numberOfProcessors) {
|
||||
return boundedBy((numberOfProcessors + 1) / 2, 1, 10);
|
||||
}
|
||||
|
||||
static int twiceNumberOfProcessors(int numberOfProcessors) {
|
||||
return boundedBy(2 * numberOfProcessors, 2, Integer.MAX_VALUE);
|
||||
}
|
||||
|
||||
private void updateSettings(Settings settings) {
|
||||
Map<String, Settings> groupSettings = settings.getAsGroups();
|
||||
if (groupSettings.isEmpty()) {
|
||||
|
@ -969,4 +1004,5 @@ public class ThreadPool extends AbstractComponent implements Closeable {
|
|||
public ThreadContext getThreadContext() {
|
||||
return threadContext;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -50,8 +50,7 @@ public class BulkProcessorRetryIT extends ESIntegTestCase {
|
|||
//Have very low pool and queue sizes to overwhelm internal pools easily
|
||||
return Settings.builder()
|
||||
.put(super.nodeSettings(nodeOrdinal))
|
||||
.put("threadpool.generic.size", 1)
|
||||
.put("threadpool.generic.queue_size", 1)
|
||||
.put("threadpool.generic.max", 4)
|
||||
// don't mess with this one! It's quite sensitive to a low queue size
|
||||
// (see also ThreadedActionListener which is happily spawning threads even when we already got rejected)
|
||||
//.put("threadpool.listener.queue_size", 1)
|
||||
|
|
|
@ -0,0 +1,78 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.lucene.search;
|
||||
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
public class MatchNoDocsQueryTests extends ESTestCase {
|
||||
public void testSimple() throws Exception {
|
||||
MatchNoDocsQuery query = new MatchNoDocsQuery("field 'title' not found");
|
||||
assertThat(query.toString(), equalTo("MatchNoDocsQuery[\"field 'title' not found\"]"));
|
||||
Query rewrite = query.rewrite(null);
|
||||
assertTrue(rewrite instanceof MatchNoDocsQuery);
|
||||
assertThat(rewrite.toString(), equalTo("MatchNoDocsQuery[\"field 'title' not found\"]"));
|
||||
}
|
||||
|
||||
public void testSearch() throws Exception {
|
||||
IndexWriter writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("field", "aaa bbb ccc ddd", TextField.TYPE_NOT_STORED));
|
||||
writer.addDocument(doc);
|
||||
IndexReader reader = DirectoryReader.open(writer);
|
||||
IndexSearcher searcher = new IndexSearcher(reader);
|
||||
|
||||
Query query = new MatchNoDocsQuery("field not found");
|
||||
assertThat(searcher.count(query), equalTo(0));
|
||||
|
||||
BooleanQuery.Builder bq = new BooleanQuery.Builder();
|
||||
bq.add(new BooleanClause(new TermQuery(new Term("field", "aaa")), BooleanClause.Occur.SHOULD));
|
||||
bq.add(new BooleanClause(new MatchNoDocsQuery("field not found"), BooleanClause.Occur.MUST));
|
||||
query = bq.build();
|
||||
assertThat(searcher.count(query), equalTo(0));
|
||||
assertThat(query.toString(), equalTo("field:aaa +MatchNoDocsQuery[\"field not found\"]"));
|
||||
|
||||
|
||||
bq = new BooleanQuery.Builder();
|
||||
bq.add(new BooleanClause(new TermQuery(new Term("field", "aaa")), BooleanClause.Occur.SHOULD));
|
||||
bq.add(new BooleanClause(new MatchNoDocsQuery("field not found"), BooleanClause.Occur.SHOULD));
|
||||
query = bq.build();
|
||||
assertThat(query.toString(), equalTo("field:aaa MatchNoDocsQuery[\"field not found\"]"));
|
||||
assertThat(searcher.count(query), equalTo(1));
|
||||
Query rewrite = query.rewrite(reader);
|
||||
assertThat(rewrite.toString(), equalTo("field:aaa MatchNoDocsQuery[\"field not found\"]"));
|
||||
}
|
||||
}
|
|
@ -429,7 +429,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
|
|||
.endObject()
|
||||
.bytes());
|
||||
|
||||
if (version.onOrAfter(Version.V_5_0_0)) {
|
||||
if (version.onOrAfter(Version.V_5_0_0_alpha2)) {
|
||||
assertThat(doc.rootDoc().getFields("point.lat").length, equalTo(4));
|
||||
assertThat(doc.rootDoc().getFields("point.lon").length, equalTo(4));
|
||||
|
||||
|
@ -538,7 +538,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
|
|||
.endObject()
|
||||
.bytes());
|
||||
|
||||
if (version.before(Version.V_5_0_0)) {
|
||||
if (version.before(Version.V_5_0_0_alpha2)) {
|
||||
assertThat(doc.rootDoc().getFields("point.lat").length, equalTo(2));
|
||||
assertThat(doc.rootDoc().getFields("point.lon").length, equalTo(2));
|
||||
assertThat(doc.rootDoc().getFields("point.lat")[0].numericValue().doubleValue(), equalTo(1.2));
|
||||
|
@ -660,7 +660,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
|
|||
.endObject()
|
||||
.bytes());
|
||||
|
||||
if (version.before(Version.V_5_0_0)) {
|
||||
if (version.before(Version.V_5_0_0_alpha2)) {
|
||||
assertThat(doc.rootDoc().getFields("point.lat").length, equalTo(2));
|
||||
assertThat(doc.rootDoc().getFields("point.lon").length, equalTo(2));
|
||||
assertThat(doc.rootDoc().getFields("point.lat")[0].numericValue().doubleValue(), equalTo(1.2));
|
||||
|
|
|
@ -144,7 +144,7 @@ public class FuzzyQueryBuilderTests extends AbstractQueryTestCase<FuzzyQueryBuil
|
|||
"}\n";
|
||||
Query parsedQuery = parseQuery(query).toQuery(createShardContext());
|
||||
Query expected;
|
||||
if (getIndexVersionCreated().onOrAfter(Version.V_5_0_0)) {
|
||||
if (getIndexVersionCreated().onOrAfter(Version.V_5_0_0_alpha2)) {
|
||||
expected = IntPoint.newRangeQuery(INT_FIELD_NAME, 7, 17);
|
||||
} else {
|
||||
expected = LegacyNumericRangeQuery.newIntRange(INT_FIELD_NAME, 7, 17, true, true);
|
||||
|
|
|
@ -396,7 +396,7 @@ public class QueryStringQueryBuilderTests extends AbstractQueryTestCase<QueryStr
|
|||
public void testToQueryNumericRangeQuery() throws Exception {
|
||||
assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0);
|
||||
Query query = queryStringQuery("12~0.2").defaultField(INT_FIELD_NAME).toQuery(createShardContext());
|
||||
if (getIndexVersionCreated().onOrAfter(Version.V_5_0_0)) {
|
||||
if (getIndexVersionCreated().onOrAfter(Version.V_5_0_0_alpha2)) {
|
||||
assertEquals(IntPoint.newExactQuery(INT_FIELD_NAME, 12), query);
|
||||
} else {
|
||||
LegacyNumericRangeQuery fuzzyQuery = (LegacyNumericRangeQuery) query;
|
||||
|
|
|
@ -0,0 +1,62 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.threadpool;
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public abstract class ESThreadPoolTestCase extends ESTestCase {
|
||||
|
||||
protected final ThreadPool.Info info(final ThreadPool threadPool, final String name) {
|
||||
for (final ThreadPool.Info info : threadPool.info()) {
|
||||
if (info.getName().equals(name)) {
|
||||
return info;
|
||||
}
|
||||
}
|
||||
throw new IllegalArgumentException(name);
|
||||
}
|
||||
|
||||
protected final ThreadPoolStats.Stats stats(final ThreadPool threadPool, final String name) {
|
||||
for (final ThreadPoolStats.Stats stats : threadPool.stats()) {
|
||||
if (name.equals(stats.getName())) {
|
||||
return stats;
|
||||
}
|
||||
}
|
||||
throw new IllegalArgumentException(name);
|
||||
}
|
||||
|
||||
protected final void terminateThreadPoolIfNeeded(final ThreadPool threadPool) throws InterruptedException {
|
||||
if (threadPool != null) {
|
||||
terminate(threadPool);
|
||||
}
|
||||
}
|
||||
|
||||
static String randomThreadPool(final ThreadPool.ThreadPoolType type) {
|
||||
return randomFrom(
|
||||
ThreadPool.THREAD_POOL_TYPES
|
||||
.entrySet().stream()
|
||||
.filter(t -> t.getValue().equals(type))
|
||||
.map(Map.Entry::getKey)
|
||||
.collect(Collectors.toList()));
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,245 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.threadpool;
|
||||
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.BiConsumer;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.hasToString;
|
||||
|
||||
public class ScalingThreadPoolTests extends ESThreadPoolTestCase {
|
||||
|
||||
public void testScalingThreadPoolConfiguration() throws InterruptedException {
|
||||
final String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.SCALING);
|
||||
final Settings.Builder builder = Settings.builder();
|
||||
|
||||
final int min;
|
||||
if (randomBoolean()) {
|
||||
min = randomIntBetween(0, 8);
|
||||
builder.put("threadpool." + threadPoolName + ".min", min);
|
||||
} else {
|
||||
min = "generic".equals(threadPoolName) ? 4 : 1; // the defaults
|
||||
}
|
||||
|
||||
final int sizeBasedOnNumberOfProcessors;
|
||||
if (randomBoolean()) {
|
||||
final int processors = randomIntBetween(1, 64);
|
||||
sizeBasedOnNumberOfProcessors = expectedSize(threadPoolName, processors);
|
||||
builder.put("processors", processors);
|
||||
} else {
|
||||
sizeBasedOnNumberOfProcessors = expectedSize(threadPoolName, Math.min(32, Runtime.getRuntime().availableProcessors()));
|
||||
}
|
||||
|
||||
final int expectedSize;
|
||||
if (sizeBasedOnNumberOfProcessors < min || randomBoolean()) {
|
||||
expectedSize = randomIntBetween(min, 16);
|
||||
builder.put("threadpool." + threadPoolName + ".size", expectedSize);
|
||||
} else {
|
||||
expectedSize = sizeBasedOnNumberOfProcessors;
|
||||
}
|
||||
|
||||
final long keepAlive;
|
||||
if (randomBoolean()) {
|
||||
keepAlive = randomIntBetween(1, 300);
|
||||
builder.put("threadpool." + threadPoolName + ".keep_alive", keepAlive + "s");
|
||||
} else {
|
||||
keepAlive = "generic".equals(threadPoolName) ? 30 : 300; // the defaults
|
||||
}
|
||||
|
||||
runScalingThreadPoolTest(builder.build(), (clusterSettings, threadPool) -> {
|
||||
final Executor executor = threadPool.executor(threadPoolName);
|
||||
assertThat(executor, instanceOf(EsThreadPoolExecutor.class));
|
||||
final EsThreadPoolExecutor esThreadPoolExecutor = (EsThreadPoolExecutor)executor;
|
||||
final ThreadPool.Info info = info(threadPool, threadPoolName);
|
||||
|
||||
assertThat(info.getName(), equalTo(threadPoolName));
|
||||
assertThat(info.getThreadPoolType(), equalTo(ThreadPool.ThreadPoolType.SCALING));
|
||||
|
||||
assertThat(info.getKeepAlive().seconds(), equalTo(keepAlive));
|
||||
assertThat(esThreadPoolExecutor.getKeepAliveTime(TimeUnit.SECONDS), equalTo(keepAlive));
|
||||
|
||||
assertNull(info.getQueueSize());
|
||||
assertThat(esThreadPoolExecutor.getQueue().remainingCapacity(), equalTo(Integer.MAX_VALUE));
|
||||
|
||||
assertThat(info.getMin(), equalTo(min));
|
||||
assertThat(esThreadPoolExecutor.getCorePoolSize(), equalTo(min));
|
||||
assertThat(info.getMax(), equalTo(expectedSize));
|
||||
assertThat(esThreadPoolExecutor.getMaximumPoolSize(), equalTo(expectedSize));
|
||||
});
|
||||
}
|
||||
|
||||
@FunctionalInterface
|
||||
private interface SizeFunction {
|
||||
int size(int numberOfProcessors);
|
||||
}
|
||||
|
||||
private int expectedSize(final String threadPoolName, final int numberOfProcessors) {
|
||||
final Map<String, SizeFunction> sizes = new HashMap<>();
|
||||
sizes.put(ThreadPool.Names.GENERIC, n -> ThreadPool.boundedBy(4 * n, 128, 512));
|
||||
sizes.put(ThreadPool.Names.MANAGEMENT, n -> 5);
|
||||
sizes.put(ThreadPool.Names.FLUSH, ThreadPool::halfNumberOfProcessorsMaxFive);
|
||||
sizes.put(ThreadPool.Names.REFRESH, ThreadPool::halfNumberOfProcessorsMaxTen);
|
||||
sizes.put(ThreadPool.Names.WARMER, ThreadPool::halfNumberOfProcessorsMaxFive);
|
||||
sizes.put(ThreadPool.Names.SNAPSHOT, ThreadPool::halfNumberOfProcessorsMaxFive);
|
||||
sizes.put(ThreadPool.Names.FETCH_SHARD_STARTED, ThreadPool::twiceNumberOfProcessors);
|
||||
sizes.put(ThreadPool.Names.FETCH_SHARD_STORE, ThreadPool::twiceNumberOfProcessors);
|
||||
return sizes.get(threadPoolName).size(numberOfProcessors);
|
||||
}
|
||||
|
||||
public void testValidDynamicKeepAlive() throws InterruptedException {
|
||||
final String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.SCALING);
|
||||
runScalingThreadPoolTest(Settings.EMPTY, (clusterSettings, threadPool) -> {
|
||||
final Executor beforeExecutor = threadPool.executor(threadPoolName);
|
||||
final long seconds = randomIntBetween(1, 300);
|
||||
clusterSettings.applySettings(settings("threadpool." + threadPoolName + ".keep_alive", seconds + "s"));
|
||||
final Executor afterExecutor = threadPool.executor(threadPoolName);
|
||||
assertSame(beforeExecutor, afterExecutor);
|
||||
final ThreadPool.Info info = info(threadPool, threadPoolName);
|
||||
assertThat(info.getKeepAlive().seconds(), equalTo(seconds));
|
||||
});
|
||||
}
|
||||
|
||||
public void testScalingThreadPoolIsBounded() throws InterruptedException {
|
||||
final String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.SCALING);
|
||||
final int size = randomIntBetween(32, 512);
|
||||
final Settings settings = Settings.builder().put("threadpool." + threadPoolName + ".size", size).build();
|
||||
runScalingThreadPoolTest(settings, (clusterSettings, threadPool) -> {
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
final int numberOfTasks = 2 * size;
|
||||
final CountDownLatch taskLatch = new CountDownLatch(numberOfTasks);
|
||||
for (int i = 0; i < numberOfTasks; i++) {
|
||||
threadPool.executor(threadPoolName).execute(() -> {
|
||||
try {
|
||||
latch.await();
|
||||
taskLatch.countDown();
|
||||
} catch (final InterruptedException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
final ThreadPoolStats.Stats stats = stats(threadPool, threadPoolName);
|
||||
assertThat(stats.getQueue(), equalTo(numberOfTasks - size));
|
||||
assertThat(stats.getLargest(), equalTo(size));
|
||||
latch.countDown();
|
||||
try {
|
||||
taskLatch.await();
|
||||
} catch (InterruptedException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public void testScalingThreadPoolThreadsAreTerminatedAfterKeepAlive() throws InterruptedException {
|
||||
final String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.SCALING);
|
||||
final Settings settings =
|
||||
Settings.builder()
|
||||
.put("threadpool." + threadPoolName + ".size", 128)
|
||||
.put("threadpool." + threadPoolName + ".keep_alive", "1ms")
|
||||
.build();
|
||||
runScalingThreadPoolTest(settings, ((clusterSettings, threadPool) -> {
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
for (int i = 0; i < 128; i++) {
|
||||
threadPool.executor(threadPoolName).execute(() -> {
|
||||
try {
|
||||
latch.await();
|
||||
} catch (final InterruptedException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
final int active = stats(threadPool, threadPoolName).getThreads();
|
||||
assertThat(active, equalTo(128));
|
||||
latch.countDown();
|
||||
do {
|
||||
spinForAtLeastOneMillisecond();
|
||||
} while (stats(threadPool, threadPoolName).getThreads() > 4);
|
||||
assertThat(stats(threadPool, threadPoolName).getCompleted(), equalTo(128L));
|
||||
}));
|
||||
}
|
||||
|
||||
public void testDynamicThreadPoolSize() throws InterruptedException {
|
||||
final String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.SCALING);
|
||||
runScalingThreadPoolTest(Settings.EMPTY, (clusterSettings, threadPool) -> {
|
||||
final Executor beforeExecutor = threadPool.executor(threadPoolName);
|
||||
int expectedMin = "generic".equals(threadPoolName) ? 4 : 1;
|
||||
final int size = randomIntBetween(expectedMin, Integer.MAX_VALUE);
|
||||
clusterSettings.applySettings(settings("threadpool." + threadPoolName + ".size", size));
|
||||
final Executor afterExecutor = threadPool.executor(threadPoolName);
|
||||
assertSame(beforeExecutor, afterExecutor);
|
||||
final ThreadPool.Info info = info(threadPool, threadPoolName);
|
||||
assertThat(info.getMin(), equalTo(expectedMin));
|
||||
assertThat(info.getMax(), equalTo(size));
|
||||
|
||||
assertThat(afterExecutor, instanceOf(EsThreadPoolExecutor.class));
|
||||
final EsThreadPoolExecutor executor = (EsThreadPoolExecutor)afterExecutor;
|
||||
assertThat(executor.getCorePoolSize(), equalTo(expectedMin));
|
||||
assertThat(executor.getMaximumPoolSize(), equalTo(size));
|
||||
});
|
||||
}
|
||||
|
||||
public void testResizingScalingThreadPoolQueue() throws InterruptedException {
|
||||
final String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.SCALING);
|
||||
runScalingThreadPoolTest(Settings.EMPTY, (clusterSettings, threadPool) -> {
|
||||
final int size = randomIntBetween(1, Integer.MAX_VALUE);
|
||||
final IllegalArgumentException e = expectThrows(
|
||||
IllegalArgumentException.class,
|
||||
() -> clusterSettings.applySettings(settings("threadpool." + threadPoolName + ".queue_size", size)));
|
||||
assertThat(e, hasToString(
|
||||
"java.lang.IllegalArgumentException: thread pool [" + threadPoolName +
|
||||
"] of type scaling can not have its queue re-sized but was [" +
|
||||
size + "]"));
|
||||
});
|
||||
}
|
||||
|
||||
public void runScalingThreadPoolTest(
|
||||
final Settings settings,
|
||||
final BiConsumer<ClusterSettings, ThreadPool> consumer) throws InterruptedException {
|
||||
ThreadPool threadPool = null;
|
||||
try {
|
||||
final String test = Thread.currentThread().getStackTrace()[2].getMethodName();
|
||||
final Settings nodeSettings = Settings.builder().put(settings).put("node.name", test).build();
|
||||
threadPool = new ThreadPool(nodeSettings);
|
||||
final ClusterSettings clusterSettings = new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
|
||||
threadPool.setClusterSettings(clusterSettings);
|
||||
consumer.accept(clusterSettings, threadPool);
|
||||
} finally {
|
||||
terminateThreadPoolIfNeeded(threadPool);
|
||||
}
|
||||
}
|
||||
|
||||
private static Settings settings(final String setting, final int value) {
|
||||
return settings(setting, Integer.toString(value));
|
||||
}
|
||||
|
||||
private static Settings settings(final String setting, final String value) {
|
||||
return Settings.builder().put(setting, value).build();
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,49 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.threadpool;
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
|
||||
public class ThreadPoolTests extends ESTestCase {
|
||||
|
||||
public void testBoundedByBelowMin() {
|
||||
int min = randomIntBetween(0, 32);
|
||||
int max = randomIntBetween(min + 1, 64);
|
||||
int value = randomIntBetween(Integer.MIN_VALUE, min - 1);
|
||||
assertThat(ThreadPool.boundedBy(value, min, max), equalTo(min));
|
||||
}
|
||||
|
||||
public void testBoundedByAboveMax() {
|
||||
int min = randomIntBetween(0, 32);
|
||||
int max = randomIntBetween(min + 1, 64);
|
||||
int value = randomIntBetween(max + 1, Integer.MAX_VALUE);
|
||||
assertThat(ThreadPool.boundedBy(value, min, max), equalTo(max));
|
||||
}
|
||||
|
||||
public void testBoundedByBetweenMinAndMax() {
|
||||
int min = randomIntBetween(0, 32);
|
||||
int max = randomIntBetween(min + 1, 64);
|
||||
int value = randomIntBetween(min, max);
|
||||
assertThat(ThreadPool.boundedBy(value, min, max), equalTo(value));
|
||||
}
|
||||
|
||||
}
|
|
@ -23,7 +23,6 @@ import org.elasticsearch.common.settings.ClusterSettings;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.EsExecutors;
|
||||
import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.threadpool.ThreadPool.Names;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
|
@ -46,7 +45,7 @@ import static org.hamcrest.Matchers.sameInstance;
|
|||
|
||||
/**
|
||||
*/
|
||||
public class UpdateThreadPoolSettingsTests extends ESTestCase {
|
||||
public class UpdateThreadPoolSettingsTests extends ESThreadPoolTestCase {
|
||||
|
||||
public void testCorrectThreadPoolTypePermittedInSettings() throws InterruptedException {
|
||||
String threadPoolName = randomThreadPoolName();
|
||||
|
@ -162,56 +161,6 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testCachedExecutorType() throws InterruptedException {
|
||||
String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.CACHED);
|
||||
ThreadPool threadPool = null;
|
||||
try {
|
||||
Settings nodeSettings = Settings.builder()
|
||||
.put("node.name", "testCachedExecutorType").build();
|
||||
threadPool = new ThreadPool(nodeSettings);
|
||||
ClusterSettings clusterSettings = new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
|
||||
threadPool.setClusterSettings(clusterSettings);
|
||||
|
||||
assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.CACHED);
|
||||
assertThat(threadPool.executor(threadPoolName), instanceOf(EsThreadPoolExecutor.class));
|
||||
|
||||
Settings settings = clusterSettings.applySettings(Settings.builder()
|
||||
.put("threadpool." + threadPoolName + ".keep_alive", "10m")
|
||||
.build());
|
||||
assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.CACHED);
|
||||
assertThat(threadPool.executor(threadPoolName), instanceOf(EsThreadPoolExecutor.class));
|
||||
assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getCorePoolSize(), equalTo(0));
|
||||
// Make sure keep alive value changed
|
||||
assertThat(info(threadPool, threadPoolName).getKeepAlive().minutes(), equalTo(10L));
|
||||
assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(10L));
|
||||
|
||||
// Make sure keep alive value reused
|
||||
assertThat(info(threadPool, threadPoolName).getKeepAlive().minutes(), equalTo(10L));
|
||||
assertThat(threadPool.executor(threadPoolName), instanceOf(EsThreadPoolExecutor.class));
|
||||
|
||||
// Change keep alive
|
||||
Executor oldExecutor = threadPool.executor(threadPoolName);
|
||||
settings = clusterSettings.applySettings(Settings.builder().put(settings).put("threadpool." + threadPoolName + ".keep_alive", "1m").build());
|
||||
// Make sure keep alive value changed
|
||||
assertThat(info(threadPool, threadPoolName).getKeepAlive().minutes(), equalTo(1L));
|
||||
assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(1L));
|
||||
// Make sure executor didn't change
|
||||
assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.CACHED);
|
||||
assertThat(threadPool.executor(threadPoolName), sameInstance(oldExecutor));
|
||||
|
||||
// Set the same keep alive
|
||||
settings = clusterSettings.applySettings(Settings.builder().put(settings).put("threadpool." + threadPoolName + ".keep_alive", "1m").build());
|
||||
// Make sure keep alive value didn't change
|
||||
assertThat(info(threadPool, threadPoolName).getKeepAlive().minutes(), equalTo(1L));
|
||||
assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(1L));
|
||||
// Make sure executor didn't change
|
||||
assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.CACHED);
|
||||
assertThat(threadPool.executor(threadPoolName), sameInstance(oldExecutor));
|
||||
} finally {
|
||||
terminateThreadPoolIfNeeded(threadPool);
|
||||
}
|
||||
}
|
||||
|
||||
private static int getExpectedThreadPoolSize(Settings settings, String name, int size) {
|
||||
if (name.equals(ThreadPool.Names.BULK) || name.equals(ThreadPool.Names.INDEX)) {
|
||||
return Math.min(size, EsExecutors.boundedNumberOfProcessors(settings));
|
||||
|
@ -273,7 +222,7 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase {
|
|||
assertThat(threadPool.executor(threadPoolName), sameInstance(oldExecutor));
|
||||
|
||||
// Change queue capacity
|
||||
settings = clusterSettings.applySettings(Settings.builder().put(settings).put("threadpool." + threadPoolName + ".queue", "500")
|
||||
clusterSettings.applySettings(Settings.builder().put(settings).put("threadpool." + threadPoolName + ".queue", "500")
|
||||
.build());
|
||||
} finally {
|
||||
terminateThreadPoolIfNeeded(threadPool);
|
||||
|
@ -290,9 +239,11 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase {
|
|||
threadPool = new ThreadPool(nodeSettings);
|
||||
ClusterSettings clusterSettings = new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
|
||||
threadPool.setClusterSettings(clusterSettings);
|
||||
assertThat(info(threadPool, threadPoolName).getMin(), equalTo(1));
|
||||
final int expectedMinimum = "generic".equals(threadPoolName) ? 4 : 1;
|
||||
assertThat(info(threadPool, threadPoolName).getMin(), equalTo(expectedMinimum));
|
||||
assertThat(info(threadPool, threadPoolName).getMax(), equalTo(10));
|
||||
assertThat(info(threadPool, threadPoolName).getKeepAlive().minutes(), equalTo(5L));
|
||||
final long expectedKeepAlive = "generic".equals(threadPoolName) ? 30 : 300;
|
||||
assertThat(info(threadPool, threadPoolName).getKeepAlive().seconds(), equalTo(expectedKeepAlive));
|
||||
assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.SCALING);
|
||||
assertThat(threadPool.executor(threadPoolName), instanceOf(EsThreadPoolExecutor.class));
|
||||
|
||||
|
@ -358,6 +309,9 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase {
|
|||
try {
|
||||
Settings nodeSettings = Settings.builder()
|
||||
.put("threadpool.my_pool1.type", "scaling")
|
||||
.put("threadpool.my_pool1.min", 1)
|
||||
.put("threadpool.my_pool1.size", EsExecutors.boundedNumberOfProcessors(Settings.EMPTY))
|
||||
.put("threadpool.my_pool1.keep_alive", "1m")
|
||||
.put("threadpool.my_pool2.type", "fixed")
|
||||
.put("threadpool.my_pool2.size", "1")
|
||||
.put("threadpool.my_pool2.queue_size", "1")
|
||||
|
@ -429,21 +383,6 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
private void terminateThreadPoolIfNeeded(ThreadPool threadPool) throws InterruptedException {
|
||||
if (threadPool != null) {
|
||||
terminate(threadPool);
|
||||
}
|
||||
}
|
||||
|
||||
private ThreadPool.Info info(ThreadPool threadPool, String name) {
|
||||
for (ThreadPool.Info info : threadPool.info()) {
|
||||
if (info.getName().equals(name)) {
|
||||
return info;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private String randomThreadPoolName() {
|
||||
Set<String> threadPoolNames = ThreadPool.THREAD_POOL_TYPES.keySet();
|
||||
return randomFrom(threadPoolNames.toArray(new String[threadPoolNames.size()]));
|
||||
|
@ -456,7 +395,4 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase {
|
|||
return randomFrom(set.toArray(new ThreadPool.ThreadPoolType[set.size()]));
|
||||
}
|
||||
|
||||
private String randomThreadPool(ThreadPool.ThreadPoolType type) {
|
||||
return randomFrom(ThreadPool.THREAD_POOL_TYPES.entrySet().stream().filter(t -> t.getValue().equals(type)).map(Map.Entry::getKey).collect(Collectors.toList()));
|
||||
}
|
||||
}
|
||||
|
|
Binary file not shown.
Binary file not shown.
|
@ -2,13 +2,6 @@
|
|||
#
|
||||
# /etc/init.d/elasticsearch -- startup script for Elasticsearch
|
||||
#
|
||||
# Written by Miquel van Smoorenburg <miquels@cistron.nl>.
|
||||
# Modified for Debian GNU/Linux by Ian Murdock <imurdock@gnu.ai.mit.edu>.
|
||||
# Modified for Tomcat by Stefan Gybas <sgybas@debian.org>.
|
||||
# Modified for Tomcat6 by Thierry Carrez <thierry.carrez@ubuntu.com>.
|
||||
# Additional improvements by Jason Brittain <jason.brittain@mulesoft.com>.
|
||||
# Modified by Nicolas Huray for Elasticsearch <nicolas.huray@gmail.com>.
|
||||
#
|
||||
### BEGIN INIT INFO
|
||||
# Provides: elasticsearch
|
||||
# Required-Start: $network $remote_fs $named
|
||||
|
@ -193,7 +186,6 @@ case "$1" in
|
|||
restart|force-reload)
|
||||
if [ -f "$PID_FILE" ]; then
|
||||
$0 stop
|
||||
sleep 1
|
||||
fi
|
||||
$0 start
|
||||
;;
|
||||
|
|
|
@ -71,6 +71,11 @@ if [ -n $USER ] && [ -z $ES_USER ] ; then
|
|||
ES_USER=$USER
|
||||
fi
|
||||
|
||||
if [ ! -x "$exec" ]; then
|
||||
echo "The elasticsearch startup script does not exists or it is not executable, tried: $exec"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
checkJava() {
|
||||
if [ -x "$JAVA_HOME/bin/java" ]; then
|
||||
JAVA="$JAVA_HOME/bin/java"
|
||||
|
|
|
@ -13,6 +13,7 @@ a number of clients that have been contributed by the community for various lang
|
|||
* <<haskell>>
|
||||
* <<java>>
|
||||
* <<javascript>>
|
||||
* <<kotlin>>
|
||||
* <<dotnet>>
|
||||
* <<ocaml>>
|
||||
* <<perl>>
|
||||
|
@ -104,6 +105,11 @@ The following project appears to be abandoned:
|
|||
* https://github.com/ramv/node-elastical[node-elastical]:
|
||||
Node.js client for the Elasticsearch REST API
|
||||
|
||||
[[kotlin]]
|
||||
== kotlin
|
||||
|
||||
* https://github.com/mbuhot/eskotlin[ES Kotlin]:
|
||||
Elasticsearch Query DSL for kotlin based on the {client}/java-api/current/index.html[official Elasticsearch Java client].
|
||||
|
||||
[[dotnet]]
|
||||
== .NET
|
||||
|
|
|
@ -119,7 +119,7 @@ The source `transform` feature has been removed. Instead, use an ingest pipeline
|
|||
|
||||
The join between parent and child documents no longer relies on indexed fields
|
||||
and therefore from 5.0.0 onwards the `_parent` field is no longer indexed. In
|
||||
order to find documents that referrer to a specific parent id the new
|
||||
order to find documents that refer to a specific parent id, the new
|
||||
`parent_id` query can be used. The GET response and hits inside the search
|
||||
response still include the parent id under the `_parent` key.
|
||||
|
||||
|
|
|
@ -29,6 +29,11 @@ node can contribute to multiple counts as it can have multiple roles. Every
|
|||
node is implicitly a coordinating node, so whenever a node has no explicit
|
||||
roles, it will be counted as coordinating only.
|
||||
|
||||
==== Removed shard `version` information from `/_cluster/state` routing table
|
||||
|
||||
We now store allocation id's of shards in the cluster state and use that to
|
||||
select primary shards instead of the version information.
|
||||
|
||||
==== Node roles are not part of node attributes anymore
|
||||
|
||||
Node roles are now returned in a specific section, called `roles`, as part of
|
||||
|
|
|
@ -156,8 +156,8 @@ The deprecated settings `index.cache.query.enable` and
|
|||
`indices.cache.query.size` have been removed and are replaced with
|
||||
`index.requests.cache.enable` and `indices.requests.cache.size` respectively.
|
||||
|
||||
`indices.requests.cache.clean_interval has been replaced with
|
||||
`indices.cache.clean_interval and is no longer supported.
|
||||
`indices.requests.cache.clean_interval` has been replaced with
|
||||
`indices.cache.clean_interval` and is no longer supported.
|
||||
|
||||
==== Field Data Cache Settings
|
||||
|
||||
|
@ -228,7 +228,7 @@ name of the setting must be prefixed with `es.`.
|
|||
|
||||
==== Discovery Settings
|
||||
|
||||
The `discovery.zen.minimum_master_node` must bet set for nodes that have
|
||||
The `discovery.zen.minimum_master_node` must be set for nodes that have
|
||||
`network.host`, `network.bind_host`, `network.publish_host`,
|
||||
`transport.host`, `transport.bind_host`, or `transport.publish_host`
|
||||
configuration options set. We see those nodes as in "production" mode
|
||||
|
|
|
@ -11,7 +11,7 @@ There are several thread pools, but the important ones include:
|
|||
|
||||
`generic`::
|
||||
For generic operations (e.g., background node discovery).
|
||||
Thread pool type is `cached`.
|
||||
Thread pool type is `scaling`.
|
||||
|
||||
`index`::
|
||||
For index/delete operations. Thread pool type is `fixed`
|
||||
|
@ -72,26 +72,6 @@ NOTE: you can update thread pool settings dynamically using <<cluster-update-set
|
|||
|
||||
The following are the types of thread pools and their respective parameters:
|
||||
|
||||
[float]
|
||||
==== `cached`
|
||||
|
||||
The `cached` thread pool is an unbounded thread pool that will spawn a
|
||||
thread if there are pending requests. This thread pool is used to
|
||||
prevent requests submitted to this pool from blocking or being
|
||||
rejected. Unused threads in this thread pool will be terminated after
|
||||
a keep alive expires (defaults to five minutes). The `cached` thread
|
||||
pool is reserved for the <<modules-threadpool,`generic`>> thread pool.
|
||||
|
||||
The `keep_alive` parameter determines how long a thread should be kept
|
||||
around in the thread pool without doing any work.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
threadpool:
|
||||
generic:
|
||||
keep_alive: 2m
|
||||
--------------------------------------------------
|
||||
|
||||
[float]
|
||||
==== `fixed`
|
||||
|
||||
|
@ -118,9 +98,9 @@ threadpool:
|
|||
[float]
|
||||
==== `scaling`
|
||||
|
||||
The `scaling` thread pool holds a dynamic number of threads. This number is
|
||||
proportional to the workload and varies between 1 and the value of the
|
||||
`size` parameter.
|
||||
The `scaling` thread pool holds a dynamic number of threads. This
|
||||
number is proportional to the workload and varies between the value of
|
||||
the `min` and `size` parameters.
|
||||
|
||||
The `keep_alive` parameter determines how long a thread should be kept
|
||||
around in the thread pool without it doing any work.
|
||||
|
@ -129,6 +109,7 @@ around in the thread pool without it doing any work.
|
|||
--------------------------------------------------
|
||||
threadpool:
|
||||
warmer:
|
||||
min: 1
|
||||
size: 8
|
||||
keep_alive: 2m
|
||||
--------------------------------------------------
|
||||
|
|
|
@ -89,7 +89,7 @@ public class Murmur3FieldMapper extends FieldMapper {
|
|||
throw new MapperParsingException("Setting [index] cannot be modified for field [" + name + "]");
|
||||
}
|
||||
|
||||
if (parserContext.indexVersionCreated().before(Version.V_5_0_0)) {
|
||||
if (parserContext.indexVersionCreated().before(Version.V_5_0_0_alpha2)) {
|
||||
node.remove("precision_step");
|
||||
}
|
||||
|
||||
|
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.index.mapper.size;
|
|||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
@ -75,7 +74,7 @@ public class SizeFieldMapper extends MetadataFieldMapper {
|
|||
|
||||
private Builder(MappedFieldType existing, Version indexCreated) {
|
||||
super(NAME, existing == null
|
||||
? indexCreated.before(Version.V_5_0_0) ? Defaults.LEGACY_SIZE_FIELD_TYPE : Defaults.SIZE_FIELD_TYPE
|
||||
? indexCreated.before(Version.V_5_0_0_alpha2) ? Defaults.LEGACY_SIZE_FIELD_TYPE : Defaults.SIZE_FIELD_TYPE
|
||||
: existing, Defaults.LEGACY_SIZE_FIELD_TYPE);
|
||||
builder = this;
|
||||
}
|
||||
|
@ -161,7 +160,7 @@ public class SizeFieldMapper extends MetadataFieldMapper {
|
|||
return;
|
||||
}
|
||||
final int value = context.source().length();
|
||||
if (Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0)) {
|
||||
if (Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0_alpha2)) {
|
||||
fields.add(new LegacyIntegerFieldMapper.CustomIntegerNumericField(value, fieldType()));
|
||||
} else {
|
||||
boolean indexed = fieldType().indexOptions() != IndexOptions.NONE;
|
||||
|
|
|
@ -18,6 +18,6 @@ integTest {
|
|||
cluster {
|
||||
numNodes = 2
|
||||
numBwcNodes = 1
|
||||
bwcVersion = "5.0.0-SNAPSHOT" // this is the same as the current version until we released the first RC
|
||||
bwcVersion = "5.0.0-alpha2-SNAPSHOT" // this is the same as the current version until we released the first RC
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
"methods": [ "GET", "POST" ],
|
||||
"url": {
|
||||
"path": "/_ingest/pipeline/_simulate",
|
||||
"paths": [ "/_ingest/pipeline/_simulate", "/_ingest/pipeline/{id}/_simulate/" ],
|
||||
"paths": [ "/_ingest/pipeline/_simulate", "/_ingest/pipeline/{id}/_simulate" ],
|
||||
"parts": {
|
||||
"id": {
|
||||
"type" : "string",
|
||||
|
|
|
@ -314,7 +314,7 @@ public final class InternalTestCluster extends TestCluster {
|
|||
// always reduce this - it can make tests really slow
|
||||
builder.put(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING.getKey(), TimeValue.timeValueMillis(RandomInts.randomIntBetween(random, 20, 50)));
|
||||
defaultSettings = builder.build();
|
||||
executor = EsExecutors.newCached("test runner", 0, TimeUnit.SECONDS, EsExecutors.daemonThreadFactory("test_" + clusterName), new ThreadContext(Settings.EMPTY));
|
||||
executor = EsExecutors.newScaling("test runner", 0, Integer.MAX_VALUE, 0, TimeUnit.SECONDS, EsExecutors.daemonThreadFactory("test_" + clusterName), new ThreadContext(Settings.EMPTY));
|
||||
}
|
||||
|
||||
public static String configuredNodeMode() {
|
||||
|
|
|
@ -157,11 +157,6 @@ public class TestSearchContext extends SearchContext {
|
|||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SearchContext searchType(SearchType searchType) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SearchShardTarget shardTarget() {
|
||||
return null;
|
||||
|
|
|
@ -1,80 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.test.junit.rule;
|
||||
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.junit.rules.TestRule;
|
||||
import org.junit.runner.Description;
|
||||
import org.junit.runners.model.Statement;
|
||||
|
||||
/**
|
||||
* A helper rule to catch all BindTransportExceptions
|
||||
* and rerun the test for a configured number of times
|
||||
*
|
||||
* Note: Be aware, that when a test is repeated, the @After and @Before
|
||||
* annotated methods are not run a second time
|
||||
*
|
||||
*/
|
||||
public class RepeatOnExceptionRule implements TestRule {
|
||||
|
||||
private ESLogger logger;
|
||||
private int retryCount;
|
||||
private Class expectedException;
|
||||
|
||||
/**
|
||||
*
|
||||
* @param logger the es logger from the test class
|
||||
* @param retryCount number of amounts to try a single test before failing
|
||||
* @param expectedException The exception class you want to catch
|
||||
*
|
||||
*/
|
||||
public RepeatOnExceptionRule(ESLogger logger, int retryCount, Class expectedException) {
|
||||
this.logger = logger;
|
||||
this.retryCount = retryCount;
|
||||
this.expectedException = expectedException;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Statement apply(final Statement base, Description description) {
|
||||
|
||||
return new Statement() {
|
||||
@Override
|
||||
public void evaluate() throws Throwable {
|
||||
Throwable caughtThrowable = null;
|
||||
|
||||
for (int i = 0; i < retryCount; i++) {
|
||||
try {
|
||||
base.evaluate();
|
||||
return;
|
||||
} catch (Throwable t) {
|
||||
if (t.getClass().equals(expectedException)) {
|
||||
caughtThrowable = t;
|
||||
logger.info("Exception [{}] occurred, rerunning the test after [{}] failures", t, t.getClass().getSimpleName(), i+1);
|
||||
} else {
|
||||
throw t;
|
||||
}
|
||||
}
|
||||
}
|
||||
logger.error("Giving up after [{}] failures... marking test as failed", retryCount);
|
||||
throw caughtThrowable;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue