Merge branch 'master' into refactor_query_shard_context

This commit is contained in:
Simon Willnauer 2015-11-03 17:10:13 +01:00
commit 759948f6c1
74 changed files with 1256 additions and 837 deletions

1
.gitignore vendored
View File

@ -8,6 +8,7 @@ work/
logs/
.DS_Store
build/
generated-resources/
**/.local*
docs/html/
docs/build.log

View File

@ -1,18 +0,0 @@
eclipse.preferences.version=1
org.eclipse.jdt.core.compiler.annotation.inheritNullAnnotations=enabled
org.eclipse.jdt.core.compiler.annotation.missingNonNullByDefaultAnnotation=ignore
org.eclipse.jdt.core.compiler.annotation.nullable=org.elasticsearch.common.Nullable
org.eclipse.jdt.core.compiler.annotation.nullanalysis=enabled
org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.7
org.eclipse.jdt.core.compiler.compliance=1.7
org.eclipse.jdt.core.compiler.problem.forbiddenReference=warning
org.eclipse.jdt.core.compiler.problem.nonnullParameterAnnotationDropped=warning
org.eclipse.jdt.core.compiler.problem.nullAnnotationInferenceConflict=warning
org.eclipse.jdt.core.compiler.problem.nullReference=warning
org.eclipse.jdt.core.compiler.problem.nullSpecViolation=warning
org.eclipse.jdt.core.compiler.problem.nullUncheckedConversion=warning
org.eclipse.jdt.core.compiler.problem.potentialNullReference=warning
org.eclipse.jdt.core.compiler.source=1.7
org.eclipse.jdt.core.formatter.lineSplit=140
org.eclipse.jdt.core.formatter.tabulation.char=space
org.eclipse.jdt.core.formatter.tabulation.size=4

View File

@ -153,7 +153,7 @@ allprojects {
apply plugin: 'idea'
}
if (hasProperty('projectsPrefix') == false) {
if (projectsPrefix.isEmpty()) {
idea {
project {
languageLevel = sourceCompatibility
@ -178,8 +178,19 @@ allprojects {
}
}
}
task cleanEclipseSettings(type: Delete) {
delete '.settings'
}
task copyEclipseSettings(type: Copy) {
// TODO: "package this up" for external builds
from new File(project.rootDir, 'buildSrc/src/main/resources/eclipse.settings')
into '.settings'
}
// otherwise .settings is not nuked entirely
tasks.cleanEclipse.dependsOn(cleanEclipseSettings)
// otherwise the eclipse merging is *super confusing*
tasks.eclipse.dependsOn(cleanEclipse)
tasks.eclipse.dependsOn(copyEclipseSettings)
}

View File

@ -84,9 +84,11 @@ class PluginBuildPlugin extends BuildPlugin {
static Task configureBundleTask(Project project) {
PluginPropertiesTask buildProperties = project.tasks.create(name: 'pluginProperties', type: PluginPropertiesTask)
File pluginMetadata = project.file("src/main/plugin-metadata")
project.processTestResources {
from buildProperties
from pluginMetadata
project.sourceSets.test {
output.dir(buildProperties.generatedResourcesDir, builtBy: 'pluginProperties')
resources {
srcDir pluginMetadata
}
}
Task bundle = project.tasks.create(name: 'bundlePlugin', type: Zip, dependsOn: [project.jar, buildProperties])
bundle.configure {

View File

@ -19,77 +19,66 @@
package org.elasticsearch.gradle.plugin
import org.elasticsearch.gradle.ElasticsearchProperties
import org.gradle.api.DefaultTask
import org.gradle.api.InvalidUserDataException
import org.gradle.api.tasks.OutputFile
import org.gradle.api.tasks.TaskAction
import org.gradle.api.Task
import org.gradle.api.tasks.Copy
/**
* Creates a plugin descriptor.
*
* TODO: copy the example properties file to plugin documentation
*/
class PluginPropertiesTask extends DefaultTask {
class PluginPropertiesTask extends Copy {
PluginPropertiesExtension extension
Map<String, String> properties = new HashMap<>()
File generatedResourcesDir = new File(project.projectDir, 'generated-resources')
PluginPropertiesTask() {
File templateFile = new File(project.buildDir, 'templates/plugin-descriptor.properties')
Task copyPluginPropertiesTemplate = project.tasks.create('copyPluginPropertiesTemplate') {
doLast {
InputStream resourceTemplate = PluginPropertiesTask.getResourceAsStream('/plugin-descriptor.properties')
templateFile.parentFile.mkdirs()
templateFile.setText(resourceTemplate.getText('UTF-8'), 'UTF-8')
}
}
dependsOn(copyPluginPropertiesTemplate)
extension = project.extensions.create('esplugin', PluginPropertiesExtension, project)
project.clean {
delete generatedResourcesDir
}
project.afterEvaluate {
// check require properties are set
if (extension.description == null) {
throw new InvalidUserDataException('description is a required setting for esplugin')
}
if (extension.jvm && extension.classname == null) {
throw new InvalidUserDataException('classname is a required setting for esplugin with jvm=true')
}
if (extension.jvm) {
dependsOn(project.classes) // so we can check for the classname
}
fillProperties()
configure {
inputs.properties(properties)
doFirst {
if (extension.jvm && extension.isolated == false) {
String warning = "WARNING: Disabling plugin isolation in ${project.name} is deprecated and will be removed in the future"
logger.warn("${'=' * warning.length()}\n${warning}\n${'=' * warning.length()}")
}
}
// configure property substitution
from templateFile
into generatedResourcesDir
expand(generateSubstitutions())
}
}
}
@OutputFile
File propertiesFile = new File(project.buildDir, "plugin" + File.separator + "plugin-descriptor.properties")
void fillProperties() {
// TODO: need to copy the templated plugin-descriptor with a dependent task, since copy requires a file (not uri)
properties = [
Map generateSubstitutions() {
return [
'name': extension.name,
'description': extension.description,
'version': extension.version,
'elasticsearch.version': ElasticsearchProperties.version,
'elasticsearchVersion': ElasticsearchProperties.version,
'javaVersion': project.targetCompatibility as String,
'jvm': extension.jvm as String,
'site': extension.site as String
'site': extension.site as String,
'isolated': extension.isolated as String,
'classname': extension.jvm ? extension.classname : 'NA'
]
if (extension.jvm) {
properties['classname'] = extension.classname
properties['isolated'] = extension.isolated as String
properties['java.version'] = project.targetCompatibility as String
}
}
@TaskAction
void buildProperties() {
if (extension.jvm) {
File classesDir = project.sourceSets.main.output.classesDir
File classFile = new File(classesDir, extension.classname.replace('.', File.separator) + '.class')
if (classFile.exists() == false) {
throw new InvalidUserDataException('classname ' + extension.classname + ' does not exist')
}
if (extension.isolated == false) {
logger.warn('Disabling isolation is deprecated and will be removed in the future')
}
}
Properties props = new Properties()
for (Map.Entry<String, String> prop : properties) {
props.put(prop.getKey(), prop.getValue())
}
props.store(propertiesFile.newWriter(), null)
}
}

View File

@ -1,6 +1,6 @@
eclipse.preferences.version=1
encoding//src/main/java=UTF-8
encoding//src/main/resources=UTF-8
encoding//src/test/java=UTF-8
encoding//src/test/resources=UTF-8
encoding/<project>=UTF-8
encoding/rest-api-spec=UTF-8

View File

@ -0,0 +1,22 @@
eclipse.preferences.version=1
# previous configuration from maven build
# this is merged with gradle's generated properties during 'gradle eclipse'
# NOTE: null pointer analysis etc is not enabled currently, it seems very unstable
# (e.g. crashing eclipse etc)
# org.eclipse.jdt.core.compiler.annotation.inheritNullAnnotations=enabled
# org.eclipse.jdt.core.compiler.annotation.missingNonNullByDefaultAnnotation=ignore
# org.eclipse.jdt.core.compiler.annotation.nullable=org.elasticsearch.common.Nullable
# org.eclipse.jdt.core.compiler.annotation.nullanalysis=enabled
# org.eclipse.jdt.core.compiler.problem.nonnullParameterAnnotationDropped=warning
# org.eclipse.jdt.core.compiler.problem.nullAnnotationInferenceConflict=warning
# org.eclipse.jdt.core.compiler.problem.nullReference=warning
# org.eclipse.jdt.core.compiler.problem.nullSpecViolation=warning
# org.eclipse.jdt.core.compiler.problem.nullUncheckedConversion=warning
# org.eclipse.jdt.core.compiler.problem.potentialNullReference=warning
org.eclipse.jdt.core.compiler.problem.forbiddenReference=warning
org.eclipse.jdt.core.formatter.lineSplit=140
org.eclipse.jdt.core.formatter.tabulation.char=space
org.eclipse.jdt.core.formatter.tabulation.size=4

View File

@ -31,19 +31,19 @@
### mandatory elements for all plugins:
#
# 'description': simple summary of the plugin
description=${project.description}
description=${description}
#
# 'version': plugin's version
version=${project.version}
version=${version}
#
# 'name': the plugin name
name=${elasticsearch.plugin.name}
name=${name}
### mandatory elements for site plugins:
#
# 'site': set to true to indicate contents of the _site/
# directory in the root of the plugin should be served.
site=${elasticsearch.plugin.site}
site=${site}
#
### mandatory elements for jvm plugins :
#
@ -52,19 +52,19 @@ site=${elasticsearch.plugin.site}
# Note that only jar files in the root directory are
# added to the classpath for the plugin! If you need
# other resources, package them into a resources jar.
jvm=${elasticsearch.plugin.jvm}
jvm=${jvm}
#
# 'classname': the name of the class to load, fully-qualified.
classname=${elasticsearch.plugin.classname}
classname=${classname}
#
# 'java.version' version of java the code is built against
# use the system property java.specification.version
# version string must be a sequence of nonnegative decimal integers
# separated by "."'s and may have leading zeros
java.version=${java.target.version}
java.version=${javaVersion}
#
# 'elasticsearch.version' version of elasticsearch compiled against
elasticsearch.version=${elasticsearch.version}
elasticsearch.version=${elasticsearchVersion}
#
### deprecated elements for jvm plugins :
#
@ -72,5 +72,5 @@ elasticsearch.version=${elasticsearch.version}
# passing false is deprecated, and only intended to support plugins
# that have hard dependencies against each other. If this is
# not specified, then the plugin is isolated by default.
isolated=${elasticsearch.plugin.isolated}
isolated=${isolated}
#

View File

@ -38,13 +38,11 @@ public class SearchPhaseExecutionException extends ElasticsearchException {
private final ShardSearchFailure[] shardFailures;
public SearchPhaseExecutionException(String phaseName, String msg, ShardSearchFailure[] shardFailures) {
super(msg);
this.phaseName = phaseName;
this.shardFailures = shardFailures;
this(phaseName, msg, null, shardFailures);
}
public SearchPhaseExecutionException(String phaseName, String msg, Throwable cause, ShardSearchFailure[] shardFailures) {
super(msg, cause);
super(msg, deduplicateCause(cause, shardFailures));
this.phaseName = phaseName;
this.shardFailures = shardFailures;
}
@ -63,12 +61,26 @@ public class SearchPhaseExecutionException extends ElasticsearchException {
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeOptionalString(phaseName);
out.writeVInt(shardFailures == null ? 0 : shardFailures.length);
if (shardFailures != null) {
out.writeVInt(shardFailures.length);
for (ShardSearchFailure failure : shardFailures) {
failure.writeTo(out);
}
}
private static final Throwable deduplicateCause(Throwable cause, ShardSearchFailure[] shardFailures) {
if (shardFailures == null) {
throw new IllegalArgumentException("shardSearchFailures must not be null");
}
// if the cause of this exception is also the cause of one of the shard failures we don't add it
// to prevent duplication in stack traces rendered to the REST layer
if (cause != null) {
for (ShardSearchFailure failure : shardFailures) {
if (failure.getCause() == cause) {
return null;
}
}
}
return cause;
}
@Override

View File

@ -114,7 +114,7 @@ public class TransportSearchScrollQueryAndFetchAction extends AbstractComponent
public void start() {
if (scrollId.getContext().length == 0) {
listener.onFailure(new SearchPhaseExecutionException("query", "no nodes to search on", null));
listener.onFailure(new SearchPhaseExecutionException("query", "no nodes to search on", ShardSearchFailure.EMPTY_ARRAY));
return;
}
@ -175,7 +175,7 @@ public class TransportSearchScrollQueryAndFetchAction extends AbstractComponent
successfulOps.decrementAndGet();
if (counter.decrementAndGet() == 0) {
if (successfulOps.get() == 0) {
listener.onFailure(new SearchPhaseExecutionException("query_fetch", "all shards failed", buildShardFailures()));
listener.onFailure(new SearchPhaseExecutionException("query_fetch", "all shards failed", t, buildShardFailures()));
} else {
finishHim();
}

View File

@ -123,7 +123,7 @@ public class TransportSearchScrollQueryThenFetchAction extends AbstractComponent
public void start() {
if (scrollId.getContext().length == 0) {
listener.onFailure(new SearchPhaseExecutionException("query", "no nodes to search on", null));
listener.onFailure(new SearchPhaseExecutionException("query", "no nodes to search on", ShardSearchFailure.EMPTY_ARRAY));
return;
}
final AtomicInteger counter = new AtomicInteger(scrollId.getContext().length);
@ -143,7 +143,7 @@ public class TransportSearchScrollQueryThenFetchAction extends AbstractComponent
try {
executeFetchPhase();
} catch (Throwable e) {
listener.onFailure(new SearchPhaseExecutionException("query", "Fetch failed", e, null));
listener.onFailure(new SearchPhaseExecutionException("query", "Fetch failed", e, ShardSearchFailure.EMPTY_ARRAY));
return;
}
}
@ -181,12 +181,12 @@ public class TransportSearchScrollQueryThenFetchAction extends AbstractComponent
successfulOps.decrementAndGet();
if (counter.decrementAndGet() == 0) {
if (successfulOps.get() == 0) {
listener.onFailure(new SearchPhaseExecutionException("query", "all shards failed", buildShardFailures()));
listener.onFailure(new SearchPhaseExecutionException("query", "all shards failed", t, buildShardFailures()));
} else {
try {
executeFetchPhase();
} catch (Throwable e) {
listener.onFailure(new SearchPhaseExecutionException("query", "Fetch failed", e, null));
listener.onFailure(new SearchPhaseExecutionException("query", "Fetch failed", e, ShardSearchFailure.EMPTY_ARRAY));
}
}
}

View File

@ -220,17 +220,19 @@ public abstract class TransportSearchTypeAction extends TransportAction<SearchRe
logger.trace("{}: Failed to execute [{}]", t, shard, request);
}
}
final ShardSearchFailure[] shardSearchFailures = buildShardFailures();
if (successfulOps.get() == 0) {
if (logger.isDebugEnabled()) {
logger.debug("All shards failed for phase: [{}]", t, firstPhaseName());
}
// no successful ops, raise an exception
raiseEarlyFailure(new SearchPhaseExecutionException(firstPhaseName(), "all shards failed", buildShardFailures()));
raiseEarlyFailure(new SearchPhaseExecutionException(firstPhaseName(), "all shards failed", t, shardSearchFailures));
} else {
try {
innerMoveToSecondPhase();
} catch (Throwable e) {
raiseEarlyFailure(new ReduceSearchPhaseException(firstPhaseName(), "", e, buildShardFailures()));
raiseEarlyFailure(new ReduceSearchPhaseException(firstPhaseName(), "", e, shardSearchFailures));
}
}
} else {

View File

@ -19,7 +19,6 @@
package org.elasticsearch.action.termvectors;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.DocumentRequest;
@ -651,7 +650,7 @@ public class TermVectorsRequest extends SingleShardRequest<TermVectorsRequest> i
if (e.getValue() instanceof String) {
mapStrStr.put(e.getKey(), (String) e.getValue());
} else {
throw new ElasticsearchException("expecting the analyzer at [{}] to be a String, but found [{}] instead", e.getKey(), e.getValue().getClass());
throw new ElasticsearchParseException("expecting the analyzer at [{}] to be a String, but found [{}] instead", e.getKey(), e.getValue().getClass());
}
}
return mapStrStr;

View File

@ -177,7 +177,7 @@ public class ClusterModule extends AbstractModule {
registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT, Validator.TIME_NON_NEGATIVE);
registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT, Validator.TIME_NON_NEGATIVE);
registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_MAX_SIZE_PER_SEC, Validator.BYTES_SIZE);
registerClusterDynamicSetting(ThreadPool.THREADPOOL_GROUP + "*", Validator.EMPTY);
registerClusterDynamicSetting(ThreadPool.THREADPOOL_GROUP + "*", ThreadPool.THREAD_POOL_TYPE_SETTINGS_VALIDATOR);
registerClusterDynamicSetting(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, Validator.INTEGER);
registerClusterDynamicSetting(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, Validator.INTEGER);
registerClusterDynamicSetting(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, Validator.EMPTY);

View File

@ -189,7 +189,7 @@ public class HttpServer extends AbstractLifecycleComponent<HttpServer> {
sitePath = null;
// If a trailing / is missing, we redirect to the right page #2654
String redirectUrl = request.rawPath() + "/";
BytesRestResponse restResponse = new BytesRestResponse(RestStatus.MOVED_PERMANENTLY, "text/html", "<head><meta http-equiv=\"refresh\" content=\"0; URL=" + redirectUrl + "></head>");
BytesRestResponse restResponse = new BytesRestResponse(RestStatus.MOVED_PERMANENTLY, "text/html", "<head><meta http-equiv=\"refresh\" content=\"0; URL=" + redirectUrl + "\"></head>");
restResponse.addHeader("Location", redirectUrl);
channel.sendResponse(restResponse);
return;

View File

@ -355,7 +355,7 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll,
try {
clone = (ObjectMapper) super.clone();
} catch (CloneNotSupportedException e) {
throw new RuntimeException();
throw new RuntimeException(e);
}
return clone;
}

View File

@ -39,7 +39,7 @@ public class CommonTermsQueryParser implements QueryParser<CommonTermsQueryBuild
XContentParser parser = parseContext.parser();
XContentParser.Token token = parser.nextToken();
if (token != XContentParser.Token.FIELD_NAME) {
throw new ParsingException(parser.getTokenLocation(), "[common] query malformed, no field");
throw new ParsingException(parser.getTokenLocation(), "[" + CommonTermsQueryBuilder.NAME + "] query malformed, no field");
}
String fieldName = parser.currentName();
Object text = null;
@ -70,13 +70,16 @@ public class CommonTermsQueryParser implements QueryParser<CommonTermsQueryBuild
} else if ("high_freq".equals(innerFieldName) || "highFreq".equals(innerFieldName)) {
highFreqMinimumShouldMatch = parser.text();
} else {
throw new ParsingException(parser.getTokenLocation(), "[common] query does not support [" + innerFieldName
throw new ParsingException(parser.getTokenLocation(), "[" + CommonTermsQueryBuilder.NAME + "] query does not support [" + innerFieldName
+ "] for [" + currentFieldName + "]");
}
} else {
throw new ParsingException(parser.getTokenLocation(), "[" + CommonTermsQueryBuilder.NAME + "] unexpected token type [" + token
+ "] after [" + innerFieldName + "]");
}
}
} else {
throw new ParsingException(parser.getTokenLocation(), "[common] query does not support [" + currentFieldName + "]");
throw new ParsingException(parser.getTokenLocation(), "[" + CommonTermsQueryBuilder.NAME + "] query does not support [" + currentFieldName + "]");
}
} else if (token.isValue()) {
if ("query".equals(currentFieldName)) {
@ -98,7 +101,7 @@ public class CommonTermsQueryParser implements QueryParser<CommonTermsQueryBuild
} else if ("_name".equals(currentFieldName)) {
queryName = parser.text();
} else {
throw new ParsingException(parser.getTokenLocation(), "[common] query does not support [" + currentFieldName + "]");
throw new ParsingException(parser.getTokenLocation(), "[" + CommonTermsQueryBuilder.NAME + "] query does not support [" + currentFieldName + "]");
}
}
}

View File

@ -55,13 +55,15 @@ public class ExistsQueryParser implements QueryParser<ExistsQueryBuilder> {
} else if ("boost".equals(currentFieldName)) {
boost = parser.floatValue();
} else {
throw new ParsingException(parser.getTokenLocation(), "[exists] query does not support [" + currentFieldName + "]");
throw new ParsingException(parser.getTokenLocation(), "[" + ExistsQueryBuilder.NAME + "] query does not support [" + currentFieldName + "]");
}
} else {
throw new ParsingException(parser.getTokenLocation(), "[" + ExistsQueryBuilder.NAME + "] unknown token [" + token + "] after [" + currentFieldName + "]");
}
}
if (fieldPattern == null) {
throw new ParsingException(parser.getTokenLocation(), "exists must be provided with a [field]");
throw new ParsingException(parser.getTokenLocation(), "[" + ExistsQueryBuilder.NAME + "] must be provided with a [field]");
}
ExistsQueryBuilder builder = new ExistsQueryBuilder(fieldPattern);

View File

@ -70,8 +70,10 @@ public class GeoShapeQueryParser implements QueryParser<GeoShapeQueryBuilder> {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
if (fieldName != null) {
throw new ParsingException(parser.getTokenLocation(), "[" + GeoShapeQueryBuilder.NAME + "] point specified twice. [" + currentFieldName + "]");
}
fieldName = currentFieldName;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
@ -104,10 +106,12 @@ public class GeoShapeQueryParser implements QueryParser<GeoShapeQueryBuilder> {
} else if (parseContext.parseFieldMatcher().match(currentFieldName, SHAPE_PATH_FIELD)) {
shapePath = parser.text();
}
} else {
throw new ParsingException(parser.getTokenLocation(), "[" + GeoShapeQueryBuilder.NAME + "] unknown token [" + token + "] after [" + currentFieldName + "]");
}
}
} else {
throw new ParsingException(parser.getTokenLocation(), "[geo_shape] query does not support [" + currentFieldName + "]");
throw new ParsingException(parser.getTokenLocation(), "[" + GeoShapeQueryBuilder.NAME + "] query does not support [" + currentFieldName + "]");
}
}
}
@ -117,7 +121,7 @@ public class GeoShapeQueryParser implements QueryParser<GeoShapeQueryBuilder> {
} else if (parseContext.parseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) {
queryName = parser.text();
} else {
throw new ParsingException(parser.getTokenLocation(), "[geo_shape] query does not support [" + currentFieldName + "]");
throw new ParsingException(parser.getTokenLocation(), "[" + GeoShapeQueryBuilder.NAME + "] query does not support [" + currentFieldName + "]");
}
}
}

View File

@ -79,7 +79,7 @@ public class IdsQueryParser implements QueryParser<IdsQueryBuilder> {
types.add(value);
}
} else {
throw new ParsingException(parser.getTokenLocation(), "[ids] query does not support [" + currentFieldName + "]");
throw new ParsingException(parser.getTokenLocation(), "[" + IdsQueryBuilder.NAME + "] query does not support [" + currentFieldName + "]");
}
} else if (token.isValue()) {
if ("type".equals(currentFieldName) || "_type".equals(currentFieldName)) {
@ -89,12 +89,14 @@ public class IdsQueryParser implements QueryParser<IdsQueryBuilder> {
} else if ("_name".equals(currentFieldName)) {
queryName = parser.text();
} else {
throw new ParsingException(parser.getTokenLocation(), "[ids] query does not support [" + currentFieldName + "]");
throw new ParsingException(parser.getTokenLocation(), "[" + IdsQueryBuilder.NAME + "] query does not support [" + currentFieldName + "]");
}
} else {
throw new ParsingException(parser.getTokenLocation(), "[" + IdsQueryBuilder.NAME + "] unknown token [" + token + "] after [" + currentFieldName + "]");
}
}
if (!idsProvided) {
throw new ParsingException(parser.getTokenLocation(), "[ids] query, no ids values provided");
throw new ParsingException(parser.getTokenLocation(), "[" + IdsQueryBuilder.NAME + "] query, no ids values provided");
}
IdsQueryBuilder query = new IdsQueryBuilder(types.toArray(new String[types.size()]));

View File

@ -52,8 +52,10 @@ public class MatchAllQueryParser implements QueryParser<MatchAllQueryBuilder> {
} else if ("boost".equals(currentFieldName)) {
boost = parser.floatValue();
} else {
throw new ParsingException(parser.getTokenLocation(), "[match_all] query does not support [" + currentFieldName + "]");
throw new ParsingException(parser.getTokenLocation(), "[" + MatchAllQueryBuilder.NAME + "] query does not support [" + currentFieldName + "]");
}
} else {
throw new ParsingException(parser.getTokenLocation(), "[" + MatchAllQueryBuilder.NAME + "] unknown token [" + token + "] after [" + currentFieldName + "]");
}
}
MatchAllQueryBuilder queryBuilder = new MatchAllQueryBuilder();

View File

@ -51,6 +51,8 @@ public class MatchNoneQueryParser implements QueryParser<MatchNoneQueryBuilder>
} else {
throw new ParsingException(parser.getTokenLocation(), "["+MatchNoneQueryBuilder.NAME+"] query does not support [" + currentFieldName + "]");
}
} else {
throw new ParsingException(parser.getTokenLocation(), "[" + MatchNoneQueryBuilder.NAME + "] unknown token [" + token + "] after [" + currentFieldName + "]");
}
}

View File

@ -55,7 +55,7 @@ public class MatchQueryParser implements QueryParser<MatchQueryBuilder> {
XContentParser.Token token = parser.nextToken();
if (token != XContentParser.Token.FIELD_NAME) {
throw new ParsingException(parser.getTokenLocation(), "[match] query malformed, no field");
throw new ParsingException(parser.getTokenLocation(), "[" + MatchQueryBuilder.NAME + "] query malformed, no field");
}
String fieldName = parser.currentName();
@ -93,7 +93,7 @@ public class MatchQueryParser implements QueryParser<MatchQueryBuilder> {
} else if ("phrase_prefix".equals(tStr) || "phrasePrefix".equals(currentFieldName)) {
type = MatchQuery.Type.PHRASE_PREFIX;
} else {
throw new ParsingException(parser.getTokenLocation(), "[match] query does not support type " + tStr);
throw new ParsingException(parser.getTokenLocation(), "[" + MatchQueryBuilder.NAME + "] query does not support type " + tStr);
}
} else if ("analyzer".equals(currentFieldName)) {
analyzer = parser.text();
@ -131,8 +131,10 @@ public class MatchQueryParser implements QueryParser<MatchQueryBuilder> {
} else if ("_name".equals(currentFieldName)) {
queryName = parser.text();
} else {
throw new ParsingException(parser.getTokenLocation(), "[match] query does not support [" + currentFieldName + "]");
throw new ParsingException(parser.getTokenLocation(), "[" + MatchQueryBuilder.NAME + "] query does not support [" + currentFieldName + "]");
}
} else {
throw new ParsingException(parser.getTokenLocation(), "[" + MatchQueryBuilder.NAME + "] unknown token [" + token + "] after [" + currentFieldName + "]");
}
}
parser.nextToken();

View File

@ -61,8 +61,10 @@ public class MissingQueryParser implements QueryParser<MissingQueryBuilder> {
} else if ("boost".equals(currentFieldName)) {
boost = parser.floatValue();
} else {
throw new ParsingException(parser.getTokenLocation(), "[missing] query does not support [" + currentFieldName + "]");
throw new ParsingException(parser.getTokenLocation(), "[" + MissingQueryBuilder.NAME + "] query does not support [" + currentFieldName + "]");
}
} else {
throw new ParsingException(parser.getTokenLocation(), "[" + MissingQueryBuilder.NAME + "] unknown token [" + token + "] after [" + currentFieldName + "]");
}
}

View File

@ -122,8 +122,10 @@ public class MultiMatchQueryParser implements QueryParser<MultiMatchQueryBuilder
} else if ("_name".equals(currentFieldName)) {
queryName = parser.text();
} else {
throw new ParsingException(parser.getTokenLocation(), "[match] query does not support [" + currentFieldName + "]");
throw new ParsingException(parser.getTokenLocation(), "[" + MultiMatchQueryBuilder.NAME + "] query does not support [" + currentFieldName + "]");
}
} else {
throw new ParsingException(parser.getTokenLocation(), "[" + MultiMatchQueryBuilder.NAME + "] unknown token [" + token + "] after [" + currentFieldName + "]");
}
}

View File

@ -22,7 +22,6 @@ package org.elasticsearch.index.query;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
@ -76,17 +75,6 @@ public class QueryParseContext {
String fieldName = parser.currentName();
if ("query".equals(fieldName)) {
queryBuilder = parseInnerQueryBuilder();
} else if ("query_binary".equals(fieldName) || "queryBinary".equals(fieldName)) {
byte[] querySource = parser.binaryValue();
XContentParser qSourceParser = XContentFactory.xContent(querySource).createParser(querySource);
QueryParseContext queryParseContext = new QueryParseContext(indicesQueriesRegistry);
queryParseContext.reset(qSourceParser);
try {
queryParseContext.parseFieldMatcher(parseFieldMatcher);
queryBuilder = queryParseContext.parseInnerQueryBuilder();
} finally {
queryParseContext.reset(null);
}
} else {
throw new ParsingException(parser.getTokenLocation(), "request does not support [" + parser.currentName() + "]");
}

View File

@ -96,7 +96,7 @@ public class QueryStringQueryParser implements QueryParser {
fieldsAndWeights.put(fField, fBoost);
}
} else {
throw new ParsingException(parser.getTokenLocation(), "[query_string] query does not support [" + currentFieldName + "]");
throw new ParsingException(parser.getTokenLocation(), "[" + QueryStringQueryBuilder.NAME + "] query does not support [" + currentFieldName + "]");
}
} else if (token.isValue()) {
if ("query".equals(currentFieldName)) {
@ -154,17 +154,19 @@ public class QueryStringQueryParser implements QueryParser {
try {
timeZone = parser.text();
} catch (IllegalArgumentException e) {
throw new ParsingException(parser.getTokenLocation(), "[query_string] time_zone [" + parser.text() + "] is unknown");
throw new ParsingException(parser.getTokenLocation(), "[" + QueryStringQueryBuilder.NAME + "] time_zone [" + parser.text() + "] is unknown");
}
} else if ("_name".equals(currentFieldName)) {
queryName = parser.text();
} else {
throw new ParsingException(parser.getTokenLocation(), "[query_string] query does not support [" + currentFieldName + "]");
throw new ParsingException(parser.getTokenLocation(), "[" + QueryStringQueryBuilder.NAME + "] query does not support [" + currentFieldName + "]");
}
} else {
throw new ParsingException(parser.getTokenLocation(), "[" + QueryStringQueryBuilder.NAME + "] unknown token [" + token + "] after [" + currentFieldName + "]");
}
}
if (queryString == null) {
throw new ParsingException(parser.getTokenLocation(), "query_string must be provided with a [query]");
throw new ParsingException(parser.getTokenLocation(), "[" + QueryStringQueryBuilder.NAME + "] must be provided with a [query]");
}
QueryStringQueryBuilder queryStringQuery = new QueryStringQueryBuilder(queryString);

View File

@ -70,7 +70,7 @@ public class SimpleQueryParser extends org.apache.lucene.queryparser.simple.Simp
rethrowUnlessLenient(e);
}
}
return simplify(bq.build());
return super.simplify(bq.build());
}
/**
@ -93,7 +93,7 @@ public class SimpleQueryParser extends org.apache.lucene.queryparser.simple.Simp
rethrowUnlessLenient(e);
}
}
return simplify(bq.build());
return super.simplify(bq.build());
}
@Override
@ -111,7 +111,7 @@ public class SimpleQueryParser extends org.apache.lucene.queryparser.simple.Simp
rethrowUnlessLenient(e);
}
}
return simplify(bq.build());
return super.simplify(bq.build());
}
/**
@ -140,19 +140,7 @@ public class SimpleQueryParser extends org.apache.lucene.queryparser.simple.Simp
return rethrowUnlessLenient(e);
}
}
return simplify(bq.build());
}
/**
* Override of lucenes SimpleQueryParser that doesn't simplify for the 1-clause case.
*/
@Override
protected Query simplify(BooleanQuery bq) {
if (bq.clauses().isEmpty()) {
return null;
} else {
return bq;
}
return super.simplify(bq.build());
}
/**

View File

@ -20,8 +20,6 @@
package org.elasticsearch.index.query;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.Query;
import org.elasticsearch.common.Strings;
@ -287,20 +285,8 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder<SimpleQuerySt
sqp.setDefaultOperator(defaultOperator.toBooleanClauseOccur());
Query query = sqp.parse(queryText);
if (query instanceof BooleanQuery) {
BooleanQuery booleanQuery = (BooleanQuery) query;
if (booleanQuery.clauses().size() > 1
&& ((booleanQuery.clauses().iterator().next().getQuery() instanceof BooleanQuery) == false)) {
// special case for one term query and more than one field: (f1:t1 f2:t1 f3:t1)
// we need to wrap this in additional BooleanQuery so minimum_should_match is applied correctly
BooleanQuery.Builder builder = new BooleanQuery.Builder();
builder.add(new BooleanClause(booleanQuery, Occur.SHOULD));
booleanQuery = builder.build();
}
if (minimumShouldMatch != null) {
booleanQuery = Queries.applyMinimumShouldMatch(booleanQuery, minimumShouldMatch);
}
query = booleanQuery;
if (minimumShouldMatch != null && query instanceof BooleanQuery) {
query = Queries.applyMinimumShouldMatch((BooleanQuery) query, minimumShouldMatch);
}
return query;
}

View File

@ -146,6 +146,8 @@ public class SimpleQueryStringParser implements QueryParser<SimpleQueryStringBui
} else {
throw new ParsingException(parser.getTokenLocation(), "[" + SimpleQueryStringBuilder.NAME + "] unsupported field [" + parser.currentName() + "]");
}
} else {
throw new ParsingException(parser.getTokenLocation(), "[" + SimpleQueryStringBuilder.NAME + "] unknown token [" + token + "] after [" + currentFieldName + "]");
}
}

View File

@ -62,11 +62,15 @@ public class TermsQueryParser implements QueryParser {
// skip
} else if (token == XContentParser.Token.START_ARRAY) {
if (fieldName != null) {
throw new ParsingException(parser.getTokenLocation(), "[terms] query does not support multiple fields");
throw new ParsingException(parser.getTokenLocation(), "[" + TermsQueryBuilder.NAME + "] query does not support multiple fields");
}
fieldName = currentFieldName;
values = parseValues(parser);
} else if (token == XContentParser.Token.START_OBJECT) {
if (fieldName != null) {
throw new ParsingException(parser.getTokenLocation(), "[" + TermsQueryBuilder.NAME + "] query does not support more than one field. "
+ "Already got: [" + fieldName + "] but also found [" + currentFieldName +"]");
}
fieldName = currentFieldName;
termsLookup = TermsLookup.parseTermsLookup(parser);
} else if (token.isValue()) {
@ -75,13 +79,15 @@ public class TermsQueryParser implements QueryParser {
} else if ("_name".equals(currentFieldName)) {
queryName = parser.text();
} else {
throw new ParsingException(parser.getTokenLocation(), "[terms] query does not support [" + currentFieldName + "]");
throw new ParsingException(parser.getTokenLocation(), "[" + TermsQueryBuilder.NAME + "] query does not support [" + currentFieldName + "]");
}
} else {
throw new ParsingException(parser.getTokenLocation(), "[" + TermsQueryBuilder.NAME + "] unknown token [" + token + "] after [" + currentFieldName + "]");
}
}
if (fieldName == null) {
throw new ParsingException(parser.getTokenLocation(), "terms query requires a field name, followed by array of terms or a document lookup specification");
throw new ParsingException(parser.getTokenLocation(), "[" + TermsQueryBuilder.NAME + "] query requires a field name, followed by array of terms or a document lookup specification");
}
return new TermsQueryBuilder(fieldName, values, termsLookup)
.boost(boost)

View File

@ -26,6 +26,7 @@ import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.query.TermsQueryBuilder;
import java.io.IOException;
import java.util.Objects;
@ -49,13 +50,13 @@ public class TermsLookup implements Writeable<TermsLookup>, ToXContent {
public TermsLookup(String index, String type, String id, String path) {
if (id == null) {
throw new IllegalArgumentException("[terms] query lookup element requires specifying the id.");
throw new IllegalArgumentException("[" + TermsQueryBuilder.NAME + "] query lookup element requires specifying the id.");
}
if (type == null) {
throw new IllegalArgumentException("[terms] query lookup element requires specifying the type.");
throw new IllegalArgumentException("[" + TermsQueryBuilder.NAME + "] query lookup element requires specifying the type.");
}
if (path == null) {
throw new IllegalArgumentException("[terms] query lookup element requires specifying the path.");
throw new IllegalArgumentException("[" + TermsQueryBuilder.NAME + "] query lookup element requires specifying the path.");
}
this.index = index;
this.type = type;
@ -122,9 +123,11 @@ public class TermsLookup implements Writeable<TermsLookup>, ToXContent {
path = parser.text();
break;
default:
throw new ParsingException(parser.getTokenLocation(), "[terms] query does not support [" + currentFieldName
throw new ParsingException(parser.getTokenLocation(), "[" + TermsQueryBuilder.NAME + "] query does not support [" + currentFieldName
+ "] within lookup element");
}
} else {
throw new ParsingException(parser.getTokenLocation(), "[" + TermsQueryBuilder.NAME + "] unknown token [" + token + "] after [" + currentFieldName + "]");
}
}
return new TermsLookup(index, type, id, path).routing(routing);

View File

@ -288,7 +288,7 @@ public class RestThreadPoolAction extends AbstractCatAction {
}
}
table.addCell(poolInfo == null ? null : poolInfo.getType());
table.addCell(poolInfo == null ? null : poolInfo.getThreadPoolType().getType());
table.addCell(poolStats == null ? null : poolStats.getActive());
table.addCell(poolStats == null ? null : poolStats.getThreads());
table.addCell(poolStats == null ? null : poolStats.getQueue());

View File

@ -1,42 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.query;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.query.ParsedQuery;
import org.elasticsearch.search.SearchParseElement;
import org.elasticsearch.search.internal.SearchContext;
/**
*
*/
public class FilterBinaryParseElement implements SearchParseElement {
@Override
public void parse(XContentParser parser, SearchContext context) throws Exception {
byte[] filterSource = parser.binaryValue();
try (XContentParser fSourceParser = XContentFactory.xContent(filterSource).createParser(filterSource)) {
ParsedQuery filter = context.indexShard().getQueryShardContext().parseInnerFilter(fSourceParser);
if (filter != null) {
context.parsedPostFilter(filter);
}
}
}
}

View File

@ -1,39 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.query;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.search.SearchParseElement;
import org.elasticsearch.search.internal.SearchContext;
/**
*
*/
public class QueryBinaryParseElement implements SearchParseElement {
@Override
public void parse(XContentParser parser, SearchContext context) throws Exception {
byte[] querySource = parser.binaryValue();
try (XContentParser qSourceParser = XContentFactory.xContent(querySource).createParser(querySource)) {
context.parsedQuery(context.indexShard().getQueryShardContext().parse(qSourceParser));
}
}
}

View File

@ -90,12 +90,8 @@ public class QueryPhase implements SearchPhase {
parseElements.put("indices_boost", new IndicesBoostParseElement());
parseElements.put("indicesBoost", new IndicesBoostParseElement());
parseElements.put("query", new QueryParseElement());
parseElements.put("queryBinary", new QueryBinaryParseElement());
parseElements.put("query_binary", new QueryBinaryParseElement());
parseElements.put("post_filter", new PostFilterParseElement());
parseElements.put("postFilter", new PostFilterParseElement());
parseElements.put("filterBinary", new FilterBinaryParseElement());
parseElements.put("filter_binary", new FilterBinaryParseElement());
parseElements.put("sort", new SortParseElement());
parseElements.put("trackScores", new TrackScoresParseElement());
parseElements.put("track_scores", new TrackScoresParseElement());

View File

@ -20,6 +20,8 @@
package org.elasticsearch.threadpool;
import org.apache.lucene.util.Counter;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.settings.Validator;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.io.stream.StreamInput;
@ -39,22 +41,11 @@ import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.node.settings.NodeSettingsService;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Queue;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.Executor;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.RejectedExecutionHandler;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.*;
import java.util.concurrent.*;
import java.util.function.Function;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import static java.util.Collections.unmodifiableMap;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
@ -86,6 +77,101 @@ public class ThreadPool extends AbstractComponent {
public static final String FETCH_SHARD_STORE = "fetch_shard_store";
}
public enum ThreadPoolType {
CACHED("cached"),
DIRECT("direct"),
FIXED("fixed"),
SCALING("scaling");
private final String type;
public String getType() {
return type;
}
ThreadPoolType(String type) {
this.type = type;
}
private final static Map<String, ThreadPoolType> TYPE_MAP;
static {
Map<String, ThreadPoolType> typeMap = new HashMap<>();
for (ThreadPoolType threadPoolType : ThreadPoolType.values()) {
typeMap.put(threadPoolType.getType(), threadPoolType);
}
TYPE_MAP = Collections.unmodifiableMap(typeMap);
}
public static ThreadPoolType fromType(String type) {
ThreadPoolType threadPoolType = TYPE_MAP.get(type);
if (threadPoolType == null) {
throw new IllegalArgumentException("no ThreadPoolType for " + type);
}
return threadPoolType;
}
}
public static Map<String, ThreadPoolType> THREAD_POOL_TYPES;
static {
HashMap<String, ThreadPoolType> map = new HashMap<>();
map.put(Names.SAME, ThreadPoolType.DIRECT);
map.put(Names.GENERIC, ThreadPoolType.CACHED);
map.put(Names.LISTENER, ThreadPoolType.FIXED);
map.put(Names.GET, ThreadPoolType.FIXED);
map.put(Names.INDEX, ThreadPoolType.FIXED);
map.put(Names.BULK, ThreadPoolType.FIXED);
map.put(Names.SEARCH, ThreadPoolType.FIXED);
map.put(Names.SUGGEST, ThreadPoolType.FIXED);
map.put(Names.PERCOLATE, ThreadPoolType.FIXED);
map.put(Names.MANAGEMENT, ThreadPoolType.SCALING);
map.put(Names.FLUSH, ThreadPoolType.SCALING);
map.put(Names.REFRESH, ThreadPoolType.SCALING);
map.put(Names.WARMER, ThreadPoolType.SCALING);
map.put(Names.SNAPSHOT, ThreadPoolType.SCALING);
map.put(Names.FORCE_MERGE, ThreadPoolType.FIXED);
map.put(Names.FETCH_SHARD_STARTED, ThreadPoolType.SCALING);
map.put(Names.FETCH_SHARD_STORE, ThreadPoolType.SCALING);
THREAD_POOL_TYPES = Collections.unmodifiableMap(map);
}
private static void add(Map<String, Settings> executorSettings, ExecutorSettingsBuilder builder) {
Settings settings = builder.build();
String name = settings.get("name");
executorSettings.put(name, settings);
}
private static class ExecutorSettingsBuilder {
Map<String, String> settings = new HashMap<>();
public ExecutorSettingsBuilder(String name) {
settings.put("name", name);
settings.put("type", THREAD_POOL_TYPES.get(name).getType());
}
public ExecutorSettingsBuilder size(int availableProcessors) {
return add("size", Integer.toString(availableProcessors));
}
public ExecutorSettingsBuilder queueSize(int queueSize) {
return add("queue_size", Integer.toString(queueSize));
}
public ExecutorSettingsBuilder keepAlive(String keepAlive) {
return add("keep_alive", keepAlive);
}
private ExecutorSettingsBuilder add(String key, String value) {
settings.put(key, value);
return this;
}
public Settings build() {
return settingsBuilder().put(settings).build();
}
}
public static final String THREADPOOL_GROUP = "threadpool.";
private volatile Map<String, ExecutorHolder> executors;
@ -102,7 +188,6 @@ public class ThreadPool extends AbstractComponent {
static final Executor DIRECT_EXECUTOR = command -> command.run();
public ThreadPool(String name) {
this(Settings.builder().put("name", name).build());
}
@ -112,42 +197,31 @@ public class ThreadPool extends AbstractComponent {
assert settings.get("name") != null : "ThreadPool's settings should contain a name";
Map<String, Settings> groupSettings = settings.getGroups(THREADPOOL_GROUP);
Map<String, Settings> groupSettings = getThreadPoolSettingsGroup(settings);
int availableProcessors = EsExecutors.boundedNumberOfProcessors(settings);
int halfProcMaxAt5 = Math.min(((availableProcessors + 1) / 2), 5);
int halfProcMaxAt10 = Math.min(((availableProcessors + 1) / 2), 10);
Map<String, Settings> defaultExecutorTypeSettings = new HashMap<>();
defaultExecutorTypeSettings.put(Names.GENERIC, settingsBuilder().put("type", "cached").put("keep_alive", "30s").build());
defaultExecutorTypeSettings.put(Names.INDEX,
settingsBuilder().put("type", "fixed").put("size", availableProcessors).put("queue_size", 200).build());
defaultExecutorTypeSettings.put(Names.BULK,
settingsBuilder().put("type", "fixed").put("size", availableProcessors).put("queue_size", 50).build());
defaultExecutorTypeSettings.put(Names.GET,
settingsBuilder().put("type", "fixed").put("size", availableProcessors).put("queue_size", 1000).build());
defaultExecutorTypeSettings.put(Names.SEARCH,
settingsBuilder().put("type", "fixed").put("size", ((availableProcessors * 3) / 2) + 1).put("queue_size", 1000).build());
defaultExecutorTypeSettings.put(Names.SUGGEST,
settingsBuilder().put("type", "fixed").put("size", availableProcessors).put("queue_size", 1000).build());
defaultExecutorTypeSettings.put(Names.PERCOLATE,
settingsBuilder().put("type", "fixed").put("size", availableProcessors).put("queue_size", 1000).build());
defaultExecutorTypeSettings .put(Names.MANAGEMENT, settingsBuilder().put("type", "scaling").put("keep_alive", "5m").put("size", 5).build());
add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.GENERIC).keepAlive("30s"));
add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.INDEX).size(availableProcessors).queueSize(200));
add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.BULK).size(availableProcessors).queueSize(50));
add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.GET).size(availableProcessors).queueSize(1000));
add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.SEARCH).size(((availableProcessors * 3) / 2) + 1).queueSize(1000));
add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.SUGGEST).size(availableProcessors).queueSize(1000));
add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.PERCOLATE).size(availableProcessors).queueSize(1000));
add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.MANAGEMENT).size(5).keepAlive("5m"));
// no queue as this means clients will need to handle rejections on listener queue even if the operation succeeded
// the assumption here is that the listeners should be very lightweight on the listeners side
defaultExecutorTypeSettings.put(Names.LISTENER, settingsBuilder().put("type", "fixed").put("size", halfProcMaxAt10).build());
defaultExecutorTypeSettings.put(Names.FLUSH,
settingsBuilder().put("type", "scaling").put("keep_alive", "5m").put("size", halfProcMaxAt5).build());
defaultExecutorTypeSettings.put(Names.REFRESH,
settingsBuilder().put("type", "scaling").put("keep_alive", "5m").put("size", halfProcMaxAt10).build());
defaultExecutorTypeSettings.put(Names.WARMER,
settingsBuilder().put("type", "scaling").put("keep_alive", "5m").put("size", halfProcMaxAt5).build());
defaultExecutorTypeSettings.put(Names.SNAPSHOT,
settingsBuilder().put("type", "scaling").put("keep_alive", "5m").put("size", halfProcMaxAt5).build());
defaultExecutorTypeSettings.put(Names.FORCE_MERGE, settingsBuilder().put("type", "fixed").put("size", 1).build());
defaultExecutorTypeSettings.put(Names.FETCH_SHARD_STARTED,
settingsBuilder().put("type", "scaling").put("keep_alive", "5m").put("size", availableProcessors * 2).build());
defaultExecutorTypeSettings.put(Names.FETCH_SHARD_STORE,
settingsBuilder().put("type", "scaling").put("keep_alive", "5m").put("size", availableProcessors * 2).build());
add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.LISTENER).size(halfProcMaxAt10));
add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.FLUSH).size(halfProcMaxAt5).keepAlive("5m"));
add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.REFRESH).size(halfProcMaxAt10).keepAlive("5m"));
add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.WARMER).size(halfProcMaxAt5).keepAlive("5m"));
add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.SNAPSHOT).size(halfProcMaxAt5).keepAlive("5m"));
add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.FORCE_MERGE).size(1));
add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.FETCH_SHARD_STARTED).size(availableProcessors * 2).keepAlive("5m"));
add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.FETCH_SHARD_STORE).size(availableProcessors * 2).keepAlive("5m"));
this.defaultExecutorTypeSettings = unmodifiableMap(defaultExecutorTypeSettings);
Map<String, ExecutorHolder> executors = new HashMap<>();
@ -163,8 +237,8 @@ public class ThreadPool extends AbstractComponent {
executors.put(entry.getKey(), build(entry.getKey(), entry.getValue(), Settings.EMPTY));
}
executors.put(Names.SAME, new ExecutorHolder(DIRECT_EXECUTOR, new Info(Names.SAME, "same")));
if (!executors.get(Names.GENERIC).info.getType().equals("cached")) {
executors.put(Names.SAME, new ExecutorHolder(DIRECT_EXECUTOR, new Info(Names.SAME, ThreadPoolType.DIRECT)));
if (!executors.get(Names.GENERIC).info.getThreadPoolType().equals(ThreadPoolType.CACHED)) {
throw new IllegalArgumentException("generic thread pool must be of type cached");
}
this.executors = unmodifiableMap(executors);
@ -178,6 +252,12 @@ public class ThreadPool extends AbstractComponent {
this.estimatedTimeThread.start();
}
private Map<String, Settings> getThreadPoolSettingsGroup(Settings settings) {
Map<String, Settings> groupSettings = settings.getGroups(THREADPOOL_GROUP);
validate(groupSettings);
return groupSettings;
}
public void setNodeSettingsService(NodeSettingsService nodeSettingsService) {
if(settingsListenerIsSet) {
throw new IllegalStateException("the node settings listener was set more then once");
@ -326,24 +406,28 @@ public class ThreadPool extends AbstractComponent {
settings = Settings.Builder.EMPTY_SETTINGS;
}
Info previousInfo = previousExecutorHolder != null ? previousExecutorHolder.info : null;
String type = settings.get("type", previousInfo != null ? previousInfo.getType() : defaultSettings.get("type"));
String type = settings.get("type", previousInfo != null ? previousInfo.getThreadPoolType().getType() : defaultSettings.get("type"));
ThreadPoolType threadPoolType = ThreadPoolType.fromType(type);
ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(this.settings, name);
if ("same".equals(type)) {
if (ThreadPoolType.DIRECT == threadPoolType) {
if (previousExecutorHolder != null) {
logger.debug("updating thread_pool [{}], type [{}]", name, type);
} else {
logger.debug("creating thread_pool [{}], type [{}]", name, type);
}
return new ExecutorHolder(DIRECT_EXECUTOR, new Info(name, type));
} else if ("cached".equals(type)) {
return new ExecutorHolder(DIRECT_EXECUTOR, new Info(name, threadPoolType));
} else if (ThreadPoolType.CACHED == threadPoolType) {
if (!Names.GENERIC.equals(name)) {
throw new IllegalArgumentException("thread pool type cached is reserved only for the generic thread pool and can not be applied to [" + name + "]");
}
TimeValue defaultKeepAlive = defaultSettings.getAsTime("keep_alive", timeValueMinutes(5));
if (previousExecutorHolder != null) {
if ("cached".equals(previousInfo.getType())) {
if (ThreadPoolType.CACHED == previousInfo.getThreadPoolType()) {
TimeValue updatedKeepAlive = settings.getAsTime("keep_alive", previousInfo.getKeepAlive());
if (!previousInfo.getKeepAlive().equals(updatedKeepAlive)) {
logger.debug("updating thread_pool [{}], type [{}], keep_alive [{}]", name, type, updatedKeepAlive);
((EsThreadPoolExecutor) previousExecutorHolder.executor()).setKeepAliveTime(updatedKeepAlive.millis(), TimeUnit.MILLISECONDS);
return new ExecutorHolder(previousExecutorHolder.executor(), new Info(name, type, -1, -1, updatedKeepAlive, null));
return new ExecutorHolder(previousExecutorHolder.executor(), new Info(name, threadPoolType, -1, -1, updatedKeepAlive, null));
}
return previousExecutorHolder;
}
@ -358,13 +442,13 @@ public class ThreadPool extends AbstractComponent {
logger.debug("creating thread_pool [{}], type [{}], keep_alive [{}]", name, type, keepAlive);
}
Executor executor = EsExecutors.newCached(name, keepAlive.millis(), TimeUnit.MILLISECONDS, threadFactory);
return new ExecutorHolder(executor, new Info(name, type, -1, -1, keepAlive, null));
} else if ("fixed".equals(type)) {
return new ExecutorHolder(executor, new Info(name, threadPoolType, -1, -1, keepAlive, null));
} else if (ThreadPoolType.FIXED == threadPoolType) {
int defaultSize = defaultSettings.getAsInt("size", EsExecutors.boundedNumberOfProcessors(settings));
SizeValue defaultQueueSize = getAsSizeOrUnbounded(defaultSettings, "queue", getAsSizeOrUnbounded(defaultSettings, "queue_size", null));
if (previousExecutorHolder != null) {
if ("fixed".equals(previousInfo.getType())) {
if (ThreadPoolType.FIXED == previousInfo.getThreadPoolType()) {
SizeValue updatedQueueSize = getAsSizeOrUnbounded(settings, "capacity", getAsSizeOrUnbounded(settings, "queue", getAsSizeOrUnbounded(settings, "queue_size", previousInfo.getQueueSize())));
if (Objects.equals(previousInfo.getQueueSize(), updatedQueueSize)) {
int updatedSize = settings.getAsInt("size", previousInfo.getMax());
@ -378,7 +462,7 @@ public class ThreadPool extends AbstractComponent {
((EsThreadPoolExecutor) previousExecutorHolder.executor()).setCorePoolSize(updatedSize);
((EsThreadPoolExecutor) previousExecutorHolder.executor()).setMaximumPoolSize(updatedSize);
}
return new ExecutorHolder(previousExecutorHolder.executor(), new Info(name, type, updatedSize, updatedSize, null, updatedQueueSize));
return new ExecutorHolder(previousExecutorHolder.executor(), new Info(name, threadPoolType, updatedSize, updatedSize, null, updatedQueueSize));
}
return previousExecutorHolder;
}
@ -393,13 +477,13 @@ public class ThreadPool extends AbstractComponent {
SizeValue queueSize = getAsSizeOrUnbounded(settings, "capacity", getAsSizeOrUnbounded(settings, "queue", getAsSizeOrUnbounded(settings, "queue_size", defaultQueueSize)));
logger.debug("creating thread_pool [{}], type [{}], size [{}], queue_size [{}]", name, type, size, queueSize);
Executor executor = EsExecutors.newFixed(name, size, queueSize == null ? -1 : (int) queueSize.singles(), threadFactory);
return new ExecutorHolder(executor, new Info(name, type, size, size, null, queueSize));
} else if ("scaling".equals(type)) {
return new ExecutorHolder(executor, new Info(name, threadPoolType, size, size, null, queueSize));
} else if (ThreadPoolType.SCALING == threadPoolType) {
TimeValue defaultKeepAlive = defaultSettings.getAsTime("keep_alive", timeValueMinutes(5));
int defaultMin = defaultSettings.getAsInt("min", 1);
int defaultSize = defaultSettings.getAsInt("size", EsExecutors.boundedNumberOfProcessors(settings));
if (previousExecutorHolder != null) {
if ("scaling".equals(previousInfo.getType())) {
if (ThreadPoolType.SCALING == previousInfo.getThreadPoolType()) {
TimeValue updatedKeepAlive = settings.getAsTime("keep_alive", previousInfo.getKeepAlive());
int updatedMin = settings.getAsInt("min", previousInfo.getMin());
int updatedSize = settings.getAsInt("max", settings.getAsInt("size", previousInfo.getMax()));
@ -414,7 +498,7 @@ public class ThreadPool extends AbstractComponent {
if (previousInfo.getMax() != updatedSize) {
((EsThreadPoolExecutor) previousExecutorHolder.executor()).setMaximumPoolSize(updatedSize);
}
return new ExecutorHolder(previousExecutorHolder.executor(), new Info(name, type, updatedMin, updatedSize, updatedKeepAlive, null));
return new ExecutorHolder(previousExecutorHolder.executor(), new Info(name, threadPoolType, updatedMin, updatedSize, updatedKeepAlive, null));
}
return previousExecutorHolder;
}
@ -437,13 +521,13 @@ public class ThreadPool extends AbstractComponent {
logger.debug("creating thread_pool [{}], type [{}], min [{}], size [{}], keep_alive [{}]", name, type, min, size, keepAlive);
}
Executor executor = EsExecutors.newScaling(name, min, size, keepAlive.millis(), TimeUnit.MILLISECONDS, threadFactory);
return new ExecutorHolder(executor, new Info(name, type, min, size, keepAlive, null));
return new ExecutorHolder(executor, new Info(name, threadPoolType, min, size, keepAlive, null));
}
throw new IllegalArgumentException("No type found [" + type + "], for [" + name + "]");
}
public void updateSettings(Settings settings) {
Map<String, Settings> groupSettings = settings.getGroups("threadpool");
Map<String, Settings> groupSettings = getThreadPoolSettingsGroup(settings);
if (groupSettings.isEmpty()) {
return;
}
@ -490,6 +574,20 @@ public class ThreadPool extends AbstractComponent {
}
}
private void validate(Map<String, Settings> groupSettings) {
for (String key : groupSettings.keySet()) {
if (!THREAD_POOL_TYPES.containsKey(key)) {
continue;
}
String type = groupSettings.get(key).get("type");
ThreadPoolType correctThreadPoolType = THREAD_POOL_TYPES.get(key);
// TODO: the type equality check can be removed after #3760/#6732 are addressed
if (type != null && !correctThreadPoolType.getType().equals(type)) {
throw new IllegalArgumentException("setting " + THREADPOOL_GROUP + key + ".type to " + type + " is not permitted; must be " + correctThreadPoolType.getType());
}
}
}
/**
* A thread pool size can also be unbounded and is represented by -1, which is not supported by SizeValue (which only supports positive numbers)
*/
@ -643,7 +741,7 @@ public class ThreadPool extends AbstractComponent {
public static class Info implements Streamable, ToXContent {
private String name;
private String type;
private ThreadPoolType type;
private int min;
private int max;
private TimeValue keepAlive;
@ -653,15 +751,15 @@ public class ThreadPool extends AbstractComponent {
}
public Info(String name, String type) {
public Info(String name, ThreadPoolType type) {
this(name, type, -1);
}
public Info(String name, String type, int size) {
public Info(String name, ThreadPoolType type, int size) {
this(name, type, size, size, null, null);
}
public Info(String name, String type, int min, int max, @Nullable TimeValue keepAlive, @Nullable SizeValue queueSize) {
public Info(String name, ThreadPoolType type, int min, int max, @Nullable TimeValue keepAlive, @Nullable SizeValue queueSize) {
this.name = name;
this.type = type;
this.min = min;
@ -674,7 +772,7 @@ public class ThreadPool extends AbstractComponent {
return this.name;
}
public String getType() {
public ThreadPoolType getThreadPoolType() {
return this.type;
}
@ -699,7 +797,7 @@ public class ThreadPool extends AbstractComponent {
@Override
public void readFrom(StreamInput in) throws IOException {
name = in.readString();
type = in.readString();
type = ThreadPoolType.fromType(in.readString());
min = in.readInt();
max = in.readInt();
if (in.readBoolean()) {
@ -716,7 +814,7 @@ public class ThreadPool extends AbstractComponent {
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(name);
out.writeString(type);
out.writeString(type.getType());
out.writeInt(min);
out.writeInt(max);
if (keepAlive == null) {
@ -739,7 +837,7 @@ public class ThreadPool extends AbstractComponent {
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(name, XContentBuilder.FieldCaseConversion.NONE);
builder.field(Fields.TYPE, type);
builder.field(Fields.TYPE, type.getType());
if (min != -1) {
builder.field(Fields.MIN, min);
}
@ -814,4 +912,37 @@ public class ThreadPool extends AbstractComponent {
return false;
}
public static ThreadPoolTypeSettingsValidator THREAD_POOL_TYPE_SETTINGS_VALIDATOR = new ThreadPoolTypeSettingsValidator();
private static class ThreadPoolTypeSettingsValidator implements Validator {
@Override
public String validate(String setting, String value, ClusterState clusterState) {
// TODO: the type equality validation can be removed after #3760/#6732 are addressed
Matcher matcher = Pattern.compile("threadpool\\.(.*)\\.type").matcher(setting);
if (!matcher.matches()) {
return null;
} else {
String threadPool = matcher.group(1);
ThreadPool.ThreadPoolType defaultThreadPoolType = ThreadPool.THREAD_POOL_TYPES.get(threadPool);
ThreadPool.ThreadPoolType threadPoolType;
try {
threadPoolType = ThreadPool.ThreadPoolType.fromType(value);
} catch (IllegalArgumentException e) {
return e.getMessage();
}
if (defaultThreadPoolType.equals(threadPoolType)) {
return null;
} else {
return String.format(
Locale.ROOT,
"thread pool type for [%s] can only be updated to [%s] but was [%s]",
threadPool,
defaultThreadPoolType.getType(),
threadPoolType.getType()
);
}
}
}
}
}

View File

@ -19,13 +19,16 @@
/*
* Limited security policy for scripts.
* This is what is needed for invokeDynamic functionality to work.
* This is what is needed for basic functionality to work.
*/
grant {
// groovy IndyInterface bootstrap requires this property for indy logging
permission java.util.PropertyPermission "groovy.indy.logging", "read";
// needed by Rhino engine exception handling
permission java.util.PropertyPermission "rhino.stack.style", "read";
// needed IndyInterface selectMethod (setCallSiteTarget)
permission java.lang.RuntimePermission "getClassLoader";
};

View File

@ -140,7 +140,7 @@ public class ESExceptionTests extends ESTestCase {
new SearchShardTarget("node_1", "foo", 1));
ShardSearchFailure failure1 = new ShardSearchFailure(new ParsingException(1, 2, "foobar", null),
new SearchShardTarget("node_1", "foo", 2));
SearchPhaseExecutionException ex = new SearchPhaseExecutionException("search", "all shards failed", new ShardSearchFailure[]{failure, failure1});
SearchPhaseExecutionException ex = new SearchPhaseExecutionException("search", "all shards failed", randomBoolean() ? failure1.getCause() : failure.getCause(), new ShardSearchFailure[]{failure, failure1});
XContentBuilder builder = XContentFactory.jsonBuilder();
builder.startObject();
ex.toXContent(builder, PARAMS);
@ -163,6 +163,21 @@ public class ESExceptionTests extends ESTestCase {
String expected = "{\"type\":\"search_phase_execution_exception\",\"reason\":\"all shards failed\",\"phase\":\"search\",\"grouped\":true,\"failed_shards\":[{\"shard\":1,\"index\":\"foo\",\"node\":\"node_1\",\"reason\":{\"type\":\"parsing_exception\",\"reason\":\"foobar\",\"line\":1,\"col\":2}},{\"shard\":1,\"index\":\"foo1\",\"node\":\"node_1\",\"reason\":{\"type\":\"query_shard_exception\",\"reason\":\"foobar\",\"index\":\"foo1\"}}]}";
assertEquals(expected, builder.string());
}
{
ShardSearchFailure failure = new ShardSearchFailure(new ParsingException(1, 2, "foobar", null),
new SearchShardTarget("node_1", "foo", 1));
ShardSearchFailure failure1 = new ShardSearchFailure(new ParsingException(1, 2, "foobar", null),
new SearchShardTarget("node_1", "foo", 2));
NullPointerException nullPointerException = new NullPointerException();
SearchPhaseExecutionException ex = new SearchPhaseExecutionException("search", "all shards failed", nullPointerException, new ShardSearchFailure[]{failure, failure1});
assertEquals(nullPointerException, ex.getCause());
XContentBuilder builder = XContentFactory.jsonBuilder();
builder.startObject();
ex.toXContent(builder, PARAMS);
builder.endObject();
String expected = "{\"type\":\"search_phase_execution_exception\",\"reason\":\"all shards failed\",\"phase\":\"search\",\"grouped\":true,\"failed_shards\":[{\"shard\":1,\"index\":\"foo\",\"node\":\"node_1\",\"reason\":{\"type\":\"parsing_exception\",\"reason\":\"foobar\",\"line\":1,\"col\":2}}],\"caused_by\":{\"type\":\"null_pointer_exception\",\"reason\":null}}";
assertEquals(expected, builder.string());
}
}
public void testGetRootCause() {

View File

@ -70,7 +70,7 @@ public class NativeNaiveTFIDFScoreScript extends AbstractSearchScript {
score += indexFieldTerm.tf() * indexField.docCount() / indexFieldTerm.df();
}
} catch (IOException e) {
throw new RuntimeException();
throw new RuntimeException(e);
}
}
return score;

View File

@ -43,7 +43,7 @@ public class CountDownTests extends ESTestCase {
try {
latch.await();
} catch (InterruptedException e) {
throw new RuntimeException();
throw new RuntimeException(e);
}
while (true) {
if(frequently()) {

View File

@ -214,7 +214,7 @@ public class NettyHttpServerPipeliningTests extends ESTestCase {
Thread.sleep(timeout);
} catch (InterruptedException e1) {
Thread.currentThread().interrupt();
throw new RuntimeException();
throw new RuntimeException(e1);
}
}

View File

@ -20,10 +20,12 @@
package org.elasticsearch.index.query;
import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator;
import com.fasterxml.jackson.core.JsonParseException;
import com.fasterxml.jackson.core.io.JsonStringEncoder;
import org.apache.lucene.search.Query;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
import org.elasticsearch.action.get.GetRequest;
@ -78,6 +80,7 @@ import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
import org.elasticsearch.script.*;
import org.elasticsearch.script.Script.ScriptParseException;
import org.elasticsearch.script.mustache.MustacheScriptEngineService;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.test.ESTestCase;
@ -361,6 +364,29 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
}
}
/**
* Test that adding additional object into otherwise correct query string
* should always trigger some kind of Parsing Exception.
*/
public void testUnknownObjectException() throws IOException {
String validQuery = createTestQueryBuilder().toString();
assertThat(validQuery, containsString("{"));
for (int insertionPosition = 0; insertionPosition < validQuery.length(); insertionPosition++) {
if (validQuery.charAt(insertionPosition) == '{') {
String testQuery = validQuery.substring(0, insertionPosition) + "{ \"newField\" : " + validQuery.substring(insertionPosition) + "}";
try {
parseQuery(testQuery);
fail("some parsing exception expected for query: " + testQuery);
} catch (ParsingException | ScriptParseException | ElasticsearchParseException e) {
// different kinds of exception wordings depending on location
// of mutation, so no simple asserts possible here
} catch (JsonParseException e) {
// mutation produced invalid json
}
}
}
}
/**
* Returns alternate string representation of the query that need to be tested as they are never used as output
* of {@link QueryBuilder#toXContent(XContentBuilder, ToXContent.Params)}. By default there are no alternate versions.
@ -512,7 +538,7 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
testQuery.writeTo(output);
try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(output.bytes()), namedWriteableRegistry)) {
QueryBuilder<?> prototype = queryParser(testQuery.getName()).getBuilderPrototype();
QueryBuilder<?> deserializedQuery = prototype.readFrom(in);
QueryBuilder deserializedQuery = prototype.readFrom(in);
assertEquals(deserializedQuery, testQuery);
assertEquals(deserializedQuery.hashCode(), testQuery.hashCode());
assertNotSame(deserializedQuery, testQuery);

View File

@ -20,11 +20,15 @@
package org.elasticsearch.index.query;
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
import com.fasterxml.jackson.core.JsonParseException;
import org.apache.lucene.queries.TermsQuery;
import org.apache.lucene.search.*;
import org.apache.lucene.search.join.ScoreMode;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
@ -35,6 +39,7 @@ import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
import org.elasticsearch.index.query.support.QueryInnerHits;
import org.elasticsearch.script.Script.ScriptParseException;
import org.elasticsearch.search.fetch.innerhits.InnerHitsBuilder;
import org.elasticsearch.search.fetch.innerhits.InnerHitsContext;
import org.elasticsearch.search.internal.SearchContext;
@ -44,7 +49,8 @@ import org.elasticsearch.test.TestSearchContext;
import java.io.IOException;
import java.util.Collections;
import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.CoreMatchers.instanceOf;
@ -52,6 +58,7 @@ public class HasChildQueryBuilderTests extends AbstractQueryTestCase<HasChildQue
protected static final String PARENT_TYPE = "parent";
protected static final String CHILD_TYPE = "child";
@Override
public void setUp() throws Exception {
super.setUp();
MapperService mapperService = queryShardContext().getMapperService();
@ -74,6 +81,7 @@ public class HasChildQueryBuilderTests extends AbstractQueryTestCase<HasChildQue
).string()), false, false);
}
@Override
protected void setSearchContext(String[] types) {
final MapperService mapperService = queryShardContext().getMapperService();
final IndexFieldDataService fieldData = indexFieldDataService();
@ -303,4 +311,33 @@ public class HasChildQueryBuilderTests extends AbstractQueryTestCase<HasChildQue
assertThat(typeTermQuery.getTerm().field(), equalTo(TypeFieldMapper.NAME));
assertThat(typeTermQuery.getTerm().text(), equalTo(type));
}
/**
* override superclass test, because here we need to take care that mutation doesn't happen inside
* `inner_hits` structure, because we don't parse them yet and so no exception will be triggered
* for any mutation there.
*/
@Override
public void testUnknownObjectException() throws IOException {
String validQuery = createTestQueryBuilder().toString();
assertThat(validQuery, containsString("{"));
int endPosition = validQuery.indexOf("inner_hits");
if (endPosition == -1) {
endPosition = validQuery.length() - 1;
}
for (int insertionPosition = 0; insertionPosition < endPosition; insertionPosition++) {
if (validQuery.charAt(insertionPosition) == '{') {
String testQuery = validQuery.substring(0, insertionPosition) + "{ \"newField\" : " + validQuery.substring(insertionPosition) + "}";
try {
parseQuery(testQuery);
fail("some parsing exception expected for query: " + testQuery);
} catch (ParsingException | ScriptParseException | ElasticsearchParseException e) {
// different kinds of exception wordings depending on location
// of mutation, so no simple asserts possible here
} catch (JsonParseException e) {
// mutation produced invalid json
}
}
}
}
}

View File

@ -20,10 +20,14 @@
package org.elasticsearch.index.query;
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
import com.fasterxml.jackson.core.JsonParseException;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.join.ScoreMode;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
@ -31,6 +35,7 @@ import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.query.support.QueryInnerHits;
import org.elasticsearch.script.Script.ScriptParseException;
import org.elasticsearch.search.fetch.innerhits.InnerHitsBuilder;
import org.elasticsearch.search.fetch.innerhits.InnerHitsContext;
import org.elasticsearch.search.internal.SearchContext;
@ -40,6 +45,8 @@ import org.elasticsearch.test.TestSearchContext;
import java.io.IOException;
import java.util.Arrays;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.CoreMatchers.instanceOf;
@ -47,6 +54,7 @@ public class HasParentQueryBuilderTests extends AbstractQueryTestCase<HasParentQ
protected static final String PARENT_TYPE = "parent";
protected static final String CHILD_TYPE = "child";
@Override
public void setUp() throws Exception {
super.setUp();
MapperService mapperService = queryShardContext().getMapperService();
@ -69,6 +77,7 @@ public class HasParentQueryBuilderTests extends AbstractQueryTestCase<HasParentQ
).string()), false, false);
}
@Override
protected void setSearchContext(String[] types) {
final MapperService mapperService = queryShardContext().getMapperService();
final IndexFieldDataService fieldData = indexFieldDataService();
@ -204,4 +213,33 @@ public class HasParentQueryBuilderTests extends AbstractQueryTestCase<HasParentQ
assertThat(QueryShardContext.getTypes(), equalTo(searchTypes));
HasChildQueryBuilderTests.assertLateParsingQuery(query, PARENT_TYPE, "id");
}
/**
* override superclass test, because here we need to take care that mutation doesn't happen inside
* `inner_hits` structure, because we don't parse them yet and so no exception will be triggered
* for any mutation there.
*/
@Override
public void testUnknownObjectException() throws IOException {
String validQuery = createTestQueryBuilder().toString();
assertThat(validQuery, containsString("{"));
int endPosition = validQuery.indexOf("inner_hits");
if (endPosition == -1) {
endPosition = validQuery.length() - 1;
}
for (int insertionPosition = 0; insertionPosition < endPosition; insertionPosition++) {
if (validQuery.charAt(insertionPosition) == '{') {
String testQuery = validQuery.substring(0, insertionPosition) + "{ \"newField\" : " + validQuery.substring(insertionPosition) + "}";
try {
parseQuery(testQuery);
fail("some parsing exception expected for query: " + testQuery);
} catch (ParsingException | ScriptParseException | ElasticsearchParseException e) {
// different kinds of exception wordings depending on location
// of mutation, so no simple asserts possible here
} catch (JsonParseException e) {
// mutation produced invalid json
}
}
}
}
}

View File

@ -27,38 +27,30 @@ import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Locale;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.TreeMap;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.notNullValue;
public class SimpleQueryStringBuilderTests extends AbstractQueryTestCase<SimpleQueryStringBuilder> {
private String[] queryTerms;
@Override
protected SimpleQueryStringBuilder doCreateTestQueryBuilder() {
int numberOfTerms = randomIntBetween(1, 5);
queryTerms = new String[numberOfTerms];
StringBuilder queryString = new StringBuilder();
for (int i = 0; i < numberOfTerms; i++) {
queryTerms[i] = randomAsciiOfLengthBetween(1, 10);
queryString.append(queryTerms[i] + " ");
}
SimpleQueryStringBuilder result = new SimpleQueryStringBuilder(queryString.toString().trim());
SimpleQueryStringBuilder result = new SimpleQueryStringBuilder(randomAsciiOfLengthBetween(1, 10));
if (randomBoolean()) {
result.analyzeWildcard(randomBoolean());
}
@ -82,13 +74,9 @@ public class SimpleQueryStringBuilderTests extends AbstractQueryTestCase<SimpleQ
}
if (randomBoolean()) {
Set<SimpleQueryStringFlag> flagSet = new HashSet<>();
if (numberOfTerms > 1) {
flagSet.add(SimpleQueryStringFlag.WHITESPACE);
}
int size = randomIntBetween(0, SimpleQueryStringFlag.values().length);
for (int i = 0; i < size; i++) {
SimpleQueryStringFlag randomFlag = randomFrom(SimpleQueryStringFlag.values());
flagSet.add(randomFlag);
flagSet.add(randomFrom(SimpleQueryStringFlag.values()));
}
if (flagSet.size() > 0) {
result.flags(flagSet.toArray(new SimpleQueryStringFlag[flagSet.size()]));
@ -99,12 +87,13 @@ public class SimpleQueryStringBuilderTests extends AbstractQueryTestCase<SimpleQ
Map<String, Float> fields = new HashMap<>();
for (int i = 0; i < fieldCount; i++) {
if (randomBoolean()) {
fields.put("f" + i + "_" + randomAsciiOfLengthBetween(1, 10), AbstractQueryBuilder.DEFAULT_BOOST);
fields.put(randomAsciiOfLengthBetween(1, 10), AbstractQueryBuilder.DEFAULT_BOOST);
} else {
fields.put(randomBoolean() ? STRING_FIELD_NAME : "f" + i + "_" + randomAsciiOfLengthBetween(1, 10), 2.0f / randomIntBetween(1, 20));
fields.put(randomBoolean() ? STRING_FIELD_NAME : randomAsciiOfLengthBetween(1, 10), 2.0f / randomIntBetween(1, 20));
}
}
result.fields(fields);
return result;
}
@ -259,7 +248,7 @@ public class SimpleQueryStringBuilderTests extends AbstractQueryTestCase<SimpleQ
"}";
XContentParser parser = XContentFactory.xContent(contentString).createParser(contentString);
context.reset(parser);
SimpleQueryStringBuilder queryBuilder = new SimpleQueryStringParser().fromXContent(context);
SimpleQueryStringBuilder queryBuilder = (SimpleQueryStringBuilder) parseQuery(parser, ParseFieldMatcher.EMPTY);
assertThat(queryBuilder.value(), equalTo(query));
assertThat(queryBuilder.fields(), notNullValue());
assertThat(queryBuilder.fields().size(), equalTo(0));
@ -269,8 +258,8 @@ public class SimpleQueryStringBuilderTests extends AbstractQueryTestCase<SimpleQ
// no strict field resolution (version before V_1_4_0_Beta1)
if (getCurrentTypes().length > 0 || shardContext.indexVersionCreated().before(Version.V_1_4_0_Beta1)) {
Query luceneQuery = queryBuilder.toQuery(shardContext);
assertThat(luceneQuery, instanceOf(BooleanQuery.class));
TermQuery termQuery = (TermQuery) ((BooleanQuery) luceneQuery).clauses().get(0).getQuery();
assertThat(luceneQuery, instanceOf(TermQuery.class));
TermQuery termQuery = (TermQuery) luceneQuery;
assertThat(termQuery.getTerm(), equalTo(new Term(MetaData.ALL, query)));
}
}
@ -288,7 +277,7 @@ public class SimpleQueryStringBuilderTests extends AbstractQueryTestCase<SimpleQ
if ("".equals(queryBuilder.value())) {
assertTrue("Query should have been MatchNoDocsQuery but was " + query.getClass().getName(), query instanceof MatchNoDocsQuery);
} else {
} else if (queryBuilder.fields().size() > 1) {
assertTrue("Query should have been BooleanQuery but was " + query.getClass().getName(), query instanceof BooleanQuery);
BooleanQuery boolQuery = (BooleanQuery) query;
@ -301,42 +290,32 @@ public class SimpleQueryStringBuilderTests extends AbstractQueryTestCase<SimpleQ
}
}
assertThat(boolQuery.clauses().size(), equalTo(queryTerms.length));
Map<String, Float> expectedFields = new TreeMap<String, Float>(queryBuilder.fields());
if (expectedFields.size() == 0) {
expectedFields.put(MetaData.ALL, AbstractQueryBuilder.DEFAULT_BOOST);
}
for (int i = 0; i < queryTerms.length; i++) {
BooleanClause booleanClause = boolQuery.clauses().get(i);
Iterator<Entry<String, Float>> fieldsIter = expectedFields.entrySet().iterator();
if (queryTerms.length == 1 && expectedFields.size() == 1) {
assertThat(boolQuery.clauses().size(), equalTo(queryBuilder.fields().size()));
Iterator<String> fields = queryBuilder.fields().keySet().iterator();
for (BooleanClause booleanClause : boolQuery) {
assertThat(booleanClause.getQuery(), instanceOf(TermQuery.class));
TermQuery termQuery = (TermQuery) booleanClause.getQuery();
Entry<String, Float> entry = fieldsIter.next();
assertThat(termQuery.getTerm().field(), equalTo(entry.getKey()));
assertThat(termQuery.getBoost(), equalTo(entry.getValue()));
assertThat(termQuery.getTerm().text().toLowerCase(Locale.ROOT), equalTo(queryTerms[i].toLowerCase(Locale.ROOT)));
} else {
assertThat(booleanClause.getQuery(), instanceOf(BooleanQuery.class));
for (BooleanClause clause : ((BooleanQuery) booleanClause.getQuery()).clauses()) {
TermQuery termQuery = (TermQuery) clause.getQuery();
Entry<String, Float> entry = fieldsIter.next();
assertThat(termQuery.getTerm().field(), equalTo(entry.getKey()));
assertThat(termQuery.getBoost(), equalTo(entry.getValue()));
assertThat(termQuery.getTerm().text().toLowerCase(Locale.ROOT), equalTo(queryTerms[i].toLowerCase(Locale.ROOT)));
}
}
assertThat(termQuery.getTerm().field(), equalTo(fields.next()));
assertThat(termQuery.getTerm().text().toLowerCase(Locale.ROOT), equalTo(queryBuilder.value().toLowerCase(Locale.ROOT)));
}
if (queryBuilder.minimumShouldMatch() != null) {
int optionalClauses = queryTerms.length;
if (queryBuilder.defaultOperator().equals(Operator.AND) && queryTerms.length > 1) {
optionalClauses = 0;
assertThat(boolQuery.getMinimumNumberShouldMatch(), greaterThan(0));
}
int expectedMinimumShouldMatch = Queries.calculateMinShouldMatch(optionalClauses, queryBuilder.minimumShouldMatch());
assertEquals(expectedMinimumShouldMatch, boolQuery.getMinimumNumberShouldMatch());
} else if (queryBuilder.fields().size() <= 1) {
assertTrue("Query should have been TermQuery but was " + query.getClass().getName(), query instanceof TermQuery);
TermQuery termQuery = (TermQuery) query;
String field;
if (queryBuilder.fields().size() == 0) {
field = MetaData.ALL;
} else {
field = queryBuilder.fields().keySet().iterator().next();
}
assertThat(termQuery.getTerm().field(), equalTo(field));
assertThat(termQuery.getTerm().text().toLowerCase(Locale.ROOT), equalTo(queryBuilder.value().toLowerCase(Locale.ROOT)));
} else {
fail("Encountered lucene query type we do not have a validation implementation for in our " + SimpleQueryStringBuilderTests.class.getSimpleName());
}
}
@ -362,18 +341,15 @@ public class SimpleQueryStringBuilderTests extends AbstractQueryTestCase<SimpleQ
SimpleQueryStringBuilder simpleQueryStringBuilder = new SimpleQueryStringBuilder("test");
simpleQueryStringBuilder.field(STRING_FIELD_NAME, 5);
Query query = simpleQueryStringBuilder.toQuery(shardContext);
assertThat(query, instanceOf(BooleanQuery.class));
TermQuery wrappedQuery = (TermQuery) ((BooleanQuery) query).clauses().get(0).getQuery();
assertThat(wrappedQuery.getBoost(), equalTo(5f));
assertThat(query, instanceOf(TermQuery.class));
assertThat(query.getBoost(), equalTo(5f));
simpleQueryStringBuilder = new SimpleQueryStringBuilder("test");
simpleQueryStringBuilder.field(STRING_FIELD_NAME, 5);
simpleQueryStringBuilder.boost(2);
query = simpleQueryStringBuilder.toQuery(shardContext);
assertThat(query.getBoost(), equalTo(2f));
assertThat(query, instanceOf(BooleanQuery.class));
wrappedQuery = (TermQuery) ((BooleanQuery) query).clauses().get(0).getQuery();
assertThat(wrappedQuery.getBoost(), equalTo(5f));
assertThat(query, instanceOf(TermQuery.class));
assertThat(query.getBoost(), equalTo(10f));
}
public void testNegativeFlags() throws IOException {
@ -385,39 +361,4 @@ public class SimpleQueryStringBuilderTests extends AbstractQueryTestCase<SimpleQ
otherBuilder.flags(-1);
assertThat(builder, equalTo(otherBuilder));
}
public void testMinimumShouldMatch() throws IOException {
QueryShardContext shardContext = createShardContext();
int numberOfTerms = randomIntBetween(1, 4);
int numberOfFields = randomIntBetween(1, 4);
StringBuilder queryString = new StringBuilder();
for (int i = 0; i < numberOfTerms; i++) {
queryString.append("t" + i + " ");
}
SimpleQueryStringBuilder simpleQueryStringBuilder = new SimpleQueryStringBuilder(queryString.toString().trim());
if (randomBoolean()) {
simpleQueryStringBuilder.defaultOperator(Operator.AND);
}
for (int i = 0; i < numberOfFields; i++) {
simpleQueryStringBuilder.field("f" + i);
}
int percent = randomIntBetween(1, 100);
simpleQueryStringBuilder.minimumShouldMatch(percent + "%");
BooleanQuery query = (BooleanQuery) simpleQueryStringBuilder.toQuery(shardContext);
assertEquals("query should have one should clause per term", numberOfTerms, query.clauses().size());
int expectedMinimumShouldMatch = numberOfTerms * percent / 100;
if (simpleQueryStringBuilder.defaultOperator().equals(Operator.AND) && numberOfTerms > 1) {
expectedMinimumShouldMatch = 0;
}
assertEquals(expectedMinimumShouldMatch, query.getMinimumNumberShouldMatch());
for (BooleanClause clause : query.clauses()) {
if (numberOfFields == 1 && numberOfTerms == 1) {
assertTrue(clause.getQuery() instanceof TermQuery);
} else {
assertEquals(numberOfFields, ((BooleanQuery) clause.getQuery()).clauses().size());
}
}
}
}

View File

@ -307,7 +307,7 @@ public class TemplateQueryIT extends ESIntegTestCase {
templateParams)).get();
fail("Expected SearchPhaseExecutionException");
} catch (SearchPhaseExecutionException e) {
assertThat(e.getCause().getMessage(), containsString("Illegal index script format"));
assertThat(e.toString(), containsString("Illegal index script format"));
}
}

View File

@ -42,7 +42,10 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import static org.hamcrest.Matchers.*;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.containsString;
public class TermsQueryBuilderTests extends AbstractQueryTestCase<TermsQueryBuilder> {
private List<Object> randomTerms;
@ -195,9 +198,9 @@ public class TermsQueryBuilderTests extends AbstractQueryTestCase<TermsQueryBuil
"}";
try {
parseQuery(query);
fail("Expected IllegalArgumentException");
} catch(IllegalArgumentException e) {
assertThat(e.getMessage(), is("Both values and termsLookup specified for terms query"));
fail("Expected ParsingException");
} catch(ParsingException e) {
assertThat(e.getMessage(), containsString("[" + TermsQueryBuilder.NAME + "] query does not support more than one field."));
}
}
@ -251,7 +254,7 @@ public class TermsQueryBuilderTests extends AbstractQueryTestCase<TermsQueryBuil
parseQuery(query);
fail("parsing should have failed");
} catch (ParsingException ex) {
assertThat(ex.getMessage(), equalTo("[terms] query does not support multiple fields"));
assertThat(ex.getMessage(), equalTo("[" + TermsQueryBuilder.NAME + "] query does not support multiple fields"));
}
}
}

View File

@ -37,7 +37,6 @@ public class SearchWithRejectionsIT extends ESIntegTestCase {
@Override
public Settings nodeSettings(int nodeOrdinal) {
return settingsBuilder().put(super.nodeSettings(nodeOrdinal))
.put("threadpool.search.type", "fixed")
.put("threadpool.search.size", 1)
.put("threadpool.search.queue_size", 1)
.build();

View File

@ -109,6 +109,7 @@ public class SimpleQueryStringIT extends ESIntegTestCase {
client().prepareIndex("test", "type1", "3").setSource("body", "foo bar"),
client().prepareIndex("test", "type1", "4").setSource("body", "foo baz bar"));
logger.info("--> query 1");
SearchResponse searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("foo bar").minimumShouldMatch("2")).get();
assertHitCount(searchResponse, 2l);
@ -119,13 +120,7 @@ public class SimpleQueryStringIT extends ESIntegTestCase {
assertHitCount(searchResponse, 2l);
assertSearchHits(searchResponse, "3", "4");
logger.info("--> query 3"); // test case from #13884
searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("foo")
.field("body").field("body2").field("body3").minimumShouldMatch("-50%")).get();
assertHitCount(searchResponse, 3l);
assertSearchHits(searchResponse, "1", "3", "4");
logger.info("--> query 4");
logger.info("--> query 3");
searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("foo bar baz").field("body").field("body2").minimumShouldMatch("70%")).get();
assertHitCount(searchResponse, 2l);
assertSearchHits(searchResponse, "3", "4");
@ -136,17 +131,17 @@ public class SimpleQueryStringIT extends ESIntegTestCase {
client().prepareIndex("test", "type1", "7").setSource("body2", "foo bar", "other", "foo"),
client().prepareIndex("test", "type1", "8").setSource("body2", "foo baz bar", "other", "foo"));
logger.info("--> query 5");
logger.info("--> query 4");
searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("foo bar").field("body").field("body2").minimumShouldMatch("2")).get();
assertHitCount(searchResponse, 4l);
assertSearchHits(searchResponse, "3", "4", "7", "8");
logger.info("--> query 6");
logger.info("--> query 5");
searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("foo bar").minimumShouldMatch("2")).get();
assertHitCount(searchResponse, 5l);
assertSearchHits(searchResponse, "3", "4", "6", "7", "8");
logger.info("--> query 7");
logger.info("--> query 6");
searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("foo bar baz").field("body2").field("other").minimumShouldMatch("70%")).get();
assertHitCount(searchResponse, 3l);
assertSearchHits(searchResponse, "6", "7", "8");

View File

@ -46,20 +46,13 @@ import java.lang.management.ThreadMXBean;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.Executor;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.*;
import java.util.regex.Pattern;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.sameInstance;
import static org.hamcrest.Matchers.*;
/**
*/
@ -67,7 +60,7 @@ import static org.hamcrest.Matchers.sameInstance;
public class SimpleThreadPoolIT extends ESIntegTestCase {
@Override
protected Settings nodeSettings(int nodeOrdinal) {
return Settings.settingsBuilder().put(super.nodeSettings(nodeOrdinal)).put("threadpool.search.type", "cached").build();
return Settings.settingsBuilder().build();
}
public void testThreadNames() throws Exception {
@ -130,16 +123,14 @@ public class SimpleThreadPoolIT extends ESIntegTestCase {
internalCluster().startNodesAsync(2).get();
ThreadPool threadPool = internalCluster().getDataNodeInstance(ThreadPool.class);
// Check that settings are changed
assertThat(((ThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(5L));
client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put("threadpool.search.keep_alive", "10m").build()).execute().actionGet();
assertThat(((ThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(10L));
assertThat(((ThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getQueue().remainingCapacity(), equalTo(1000));
client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put("threadpool.search.queue_size", 2000).build()).execute().actionGet();
assertThat(((ThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getQueue().remainingCapacity(), equalTo(2000));
// Make sure that threads continue executing when executor is replaced
final CyclicBarrier barrier = new CyclicBarrier(2);
Executor oldExecutor = threadPool.executor(Names.SEARCH);
threadPool.executor(Names.SEARCH).execute(new Runnable() {
@Override
public void run() {
threadPool.executor(Names.SEARCH).execute(() -> {
try {
barrier.await();
} catch (InterruptedException ex) {
@ -147,9 +138,8 @@ public class SimpleThreadPoolIT extends ESIntegTestCase {
} catch (BrokenBarrierException ex) {
//
}
}
});
client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put("threadpool.search.type", "fixed").build()).execute().actionGet();
client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put("threadpool.search.queue_size", 1000).build()).execute().actionGet();
assertThat(threadPool.executor(Names.SEARCH), not(sameInstance(oldExecutor)));
assertThat(((ThreadPoolExecutor) oldExecutor).isShutdown(), equalTo(true));
assertThat(((ThreadPoolExecutor) oldExecutor).isTerminating(), equalTo(true));
@ -157,9 +147,7 @@ public class SimpleThreadPoolIT extends ESIntegTestCase {
barrier.await(10, TimeUnit.SECONDS);
// Make sure that new thread executor is functional
threadPool.executor(Names.SEARCH).execute(new Runnable() {
@Override
public void run() {
threadPool.executor(Names.SEARCH).execute(() -> {
try {
barrier.await();
} catch (InterruptedException ex) {
@ -168,13 +156,10 @@ public class SimpleThreadPoolIT extends ESIntegTestCase {
//
}
}
});
client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put("threadpool.search.type", "fixed").build()).execute().actionGet();
);
client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put("threadpool.search.queue_size", 500)).execute().actionGet();
barrier.await(10, TimeUnit.SECONDS);
// This was here: Thread.sleep(200);
// Why? What was it for?
// Check that node info is correct
NodesInfoResponse nodesInfoResponse = client().admin().cluster().prepareNodesInfo().all().execute().actionGet();
for (int i = 0; i < 2; i++) {
@ -182,7 +167,7 @@ public class SimpleThreadPoolIT extends ESIntegTestCase {
boolean found = false;
for (ThreadPool.Info info : nodeInfo.getThreadPool()) {
if (info.getName().equals(Names.SEARCH)) {
assertThat(info.getType(), equalTo("fixed"));
assertEquals(info.getThreadPoolType(), ThreadPool.ThreadPoolType.FIXED);
found = true;
break;
}

View File

@ -18,6 +18,7 @@
*/
package org.elasticsearch.threadpool;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.Version;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
@ -30,7 +31,9 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.test.ESTestCase;
import org.junit.Before;
import java.io.IOException;
import java.util.Map;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
@ -44,9 +47,16 @@ import static org.hamcrest.Matchers.nullValue;
*/
public class ThreadPoolSerializationTests extends ESTestCase {
BytesStreamOutput output = new BytesStreamOutput();
private ThreadPool.ThreadPoolType threadPoolType;
@Before
public void setUp() throws Exception {
super.setUp();
threadPoolType = randomFrom(ThreadPool.ThreadPoolType.values());
}
public void testThatQueueSizeSerializationWorks() throws Exception {
ThreadPool.Info info = new ThreadPool.Info("foo", "search", 1, 10, TimeValue.timeValueMillis(3000), SizeValue.parseSizeValue("10k"));
ThreadPool.Info info = new ThreadPool.Info("foo", threadPoolType, 1, 10, TimeValue.timeValueMillis(3000), SizeValue.parseSizeValue("10k"));
output.setVersion(Version.CURRENT);
info.writeTo(output);
@ -58,7 +68,7 @@ public class ThreadPoolSerializationTests extends ESTestCase {
}
public void testThatNegativeQueueSizesCanBeSerialized() throws Exception {
ThreadPool.Info info = new ThreadPool.Info("foo", "search", 1, 10, TimeValue.timeValueMillis(3000), null);
ThreadPool.Info info = new ThreadPool.Info("foo", threadPoolType, 1, 10, TimeValue.timeValueMillis(3000), null);
output.setVersion(Version.CURRENT);
info.writeTo(output);
@ -70,7 +80,7 @@ public class ThreadPoolSerializationTests extends ESTestCase {
}
public void testThatToXContentWritesOutUnboundedCorrectly() throws Exception {
ThreadPool.Info info = new ThreadPool.Info("foo", "search", 1, 10, TimeValue.timeValueMillis(3000), null);
ThreadPool.Info info = new ThreadPool.Info("foo", threadPoolType, 1, 10, TimeValue.timeValueMillis(3000), null);
XContentBuilder builder = jsonBuilder();
builder.startObject();
info.toXContent(builder, ToXContent.EMPTY_PARAMS);
@ -95,7 +105,7 @@ public class ThreadPoolSerializationTests extends ESTestCase {
}
public void testThatToXContentWritesInteger() throws Exception {
ThreadPool.Info info = new ThreadPool.Info("foo", "search", 1, 10, TimeValue.timeValueMillis(3000), SizeValue.parseSizeValue("1k"));
ThreadPool.Info info = new ThreadPool.Info("foo", threadPoolType, 1, 10, TimeValue.timeValueMillis(3000), SizeValue.parseSizeValue("1k"));
XContentBuilder builder = jsonBuilder();
builder.startObject();
info.toXContent(builder, ToXContent.EMPTY_PARAMS);
@ -111,4 +121,16 @@ public class ThreadPoolSerializationTests extends ESTestCase {
assertThat(map, hasKey("queue_size"));
assertThat(map.get("queue_size").toString(), is("1000"));
}
public void testThatThreadPoolTypeIsSerializedCorrectly() throws IOException {
ThreadPool.Info info = new ThreadPool.Info("foo", threadPoolType);
output.setVersion(Version.CURRENT);
info.writeTo(output);
StreamInput input = StreamInput.wrap(output.bytes());
ThreadPool.Info newInfo = new ThreadPool.Info();
newInfo.readFrom(input);
assertThat(newInfo.getThreadPoolType(), is(threadPoolType));
}
}

View File

@ -0,0 +1,73 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed wit[h
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.threadpool;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.settings.Validator;
import org.elasticsearch.test.ESTestCase;
import org.junit.Before;
import java.util.*;
import static org.junit.Assert.*;
public class ThreadPoolTypeSettingsValidatorTests extends ESTestCase {
private Validator validator;
@Before
public void setUp() throws Exception {
super.setUp();
validator = ThreadPool.THREAD_POOL_TYPE_SETTINGS_VALIDATOR;
}
public void testValidThreadPoolTypeSettings() {
for (Map.Entry<String, ThreadPool.ThreadPoolType> entry : ThreadPool.THREAD_POOL_TYPES.entrySet()) {
assertNull(validateSetting(validator, entry.getKey(), entry.getValue().getType()));
}
}
public void testInvalidThreadPoolTypeSettings() {
for (Map.Entry<String, ThreadPool.ThreadPoolType> entry : ThreadPool.THREAD_POOL_TYPES.entrySet()) {
Set<ThreadPool.ThreadPoolType> set = new HashSet<>();
set.addAll(Arrays.asList(ThreadPool.ThreadPoolType.values()));
set.remove(entry.getValue());
ThreadPool.ThreadPoolType invalidThreadPoolType = randomFrom(set.toArray(new ThreadPool.ThreadPoolType[set.size()]));
String expectedMessage = String.format(
Locale.ROOT,
"thread pool type for [%s] can only be updated to [%s] but was [%s]",
entry.getKey(),
entry.getValue().getType(),
invalidThreadPoolType.getType());
String message = validateSetting(validator, entry.getKey(), invalidThreadPoolType.getType());
assertNotNull(message);
assertEquals(expectedMessage, message);
}
}
public void testNonThreadPoolTypeSetting() {
String setting = ThreadPool.THREADPOOL_GROUP + randomAsciiOfLength(10) + "foo";
String value = randomAsciiOfLength(10);
assertNull(validator.validate(setting, value, ClusterState.PROTO));
}
private String validateSetting(Validator validator, String threadPoolName, String value) {
return validator.validate(ThreadPool.THREADPOOL_GROUP + threadPoolName + ".type", value, ClusterState.PROTO);
}
}

View File

@ -25,188 +25,237 @@ import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.threadpool.ThreadPool.Names;
import java.lang.reflect.Field;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Executor;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.nullValue;
import static org.hamcrest.Matchers.sameInstance;
import static org.hamcrest.Matchers.*;
/**
*/
public class UpdateThreadPoolSettingsTests extends ESTestCase {
private ThreadPool.Info info(ThreadPool threadPool, String name) {
for (ThreadPool.Info info : threadPool.info()) {
if (info.getName().equals(name)) {
return info;
public void testCorrectThreadPoolTypePermittedInSettings() throws InterruptedException {
String threadPoolName = randomThreadPoolName();
ThreadPool.ThreadPoolType correctThreadPoolType = ThreadPool.THREAD_POOL_TYPES.get(threadPoolName);
ThreadPool threadPool = null;
try {
threadPool = new ThreadPool(settingsBuilder()
.put("name", "testCorrectThreadPoolTypePermittedInSettings")
.put("threadpool." + threadPoolName + ".type", correctThreadPoolType.getType())
.build());
ThreadPool.Info info = info(threadPool, threadPoolName);
if (ThreadPool.Names.SAME.equals(threadPoolName)) {
assertNull(info); // we don't report on the "same" threadpool
} else {
// otherwise check we have the expected type
assertEquals(info.getThreadPoolType(), correctThreadPoolType);
}
} finally {
terminateThreadPoolIfNeeded(threadPool);
}
}
return null;
public void testThreadPoolCanNotOverrideThreadPoolType() throws InterruptedException {
String threadPoolName = randomThreadPoolName();
ThreadPool.ThreadPoolType incorrectThreadPoolType = randomIncorrectThreadPoolType(threadPoolName);
ThreadPool.ThreadPoolType correctThreadPoolType = ThreadPool.THREAD_POOL_TYPES.get(threadPoolName);
ThreadPool threadPool = null;
try {
threadPool = new ThreadPool(
settingsBuilder()
.put("name", "testThreadPoolCanNotOverrideThreadPoolType")
.put("threadpool." + threadPoolName + ".type", incorrectThreadPoolType.getType())
.build());
terminate(threadPool);
fail("expected IllegalArgumentException");
} catch (IllegalArgumentException e) {
assertThat(
e.getMessage(),
is("setting threadpool." + threadPoolName + ".type to " + incorrectThreadPoolType.getType() + " is not permitted; must be " + correctThreadPoolType.getType()));
} finally {
terminateThreadPoolIfNeeded(threadPool);
}
}
public void testUpdateSettingsCanNotChangeThreadPoolType() throws InterruptedException {
String threadPoolName = randomThreadPoolName();
ThreadPool.ThreadPoolType invalidThreadPoolType = randomIncorrectThreadPoolType(threadPoolName);
ThreadPool.ThreadPoolType validThreadPoolType = ThreadPool.THREAD_POOL_TYPES.get(threadPoolName);
ThreadPool threadPool = null;
try {
threadPool = new ThreadPool(settingsBuilder().put("name", "testUpdateSettingsCanNotChangeThreadPoolType").build());
threadPool.updateSettings(
settingsBuilder()
.put("threadpool." + threadPoolName + ".type", invalidThreadPoolType.getType())
.build()
);
fail("expected IllegalArgumentException");
} catch (IllegalArgumentException e) {
assertThat(
e.getMessage(),
is("setting threadpool." + threadPoolName + ".type to " + invalidThreadPoolType.getType() + " is not permitted; must be " + validThreadPoolType.getType()));
} finally {
terminateThreadPoolIfNeeded(threadPool);
}
}
public void testCachedExecutorType() throws InterruptedException {
ThreadPool threadPool = new ThreadPool(
String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.CACHED);
ThreadPool threadPool = null;
try {
threadPool = new ThreadPool(
Settings.settingsBuilder()
.put("threadpool.search.type", "cached")
.put("name","testCachedExecutorType").build());
.put("name", "testCachedExecutorType").build());
assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("cached"));
assertThat(info(threadPool, Names.SEARCH).getKeepAlive().minutes(), equalTo(5L));
assertThat(threadPool.executor(Names.SEARCH), instanceOf(EsThreadPoolExecutor.class));
assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.CACHED);
assertThat(threadPool.executor(threadPoolName), instanceOf(EsThreadPoolExecutor.class));
// Replace with different type
threadPool.updateSettings(settingsBuilder().put("threadpool.search.type", "same").build());
assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("same"));
assertThat(threadPool.executor(Names.SEARCH), is(ThreadPool.DIRECT_EXECUTOR));
// Replace with different type again
threadPool.updateSettings(settingsBuilder()
.put("threadpool.search.type", "scaling")
.put("threadpool.search.keep_alive", "10m")
.put("threadpool." + threadPoolName + ".keep_alive", "10m")
.build());
assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("scaling"));
assertThat(threadPool.executor(Names.SEARCH), instanceOf(EsThreadPoolExecutor.class));
assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getCorePoolSize(), equalTo(1));
assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.CACHED);
assertThat(threadPool.executor(threadPoolName), instanceOf(EsThreadPoolExecutor.class));
assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getCorePoolSize(), equalTo(0));
// Make sure keep alive value changed
assertThat(info(threadPool, Names.SEARCH).getKeepAlive().minutes(), equalTo(10L));
assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(10L));
assertThat(info(threadPool, threadPoolName).getKeepAlive().minutes(), equalTo(10L));
assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(10L));
// Put old type back
threadPool.updateSettings(settingsBuilder().put("threadpool.search.type", "cached").build());
assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("cached"));
// Make sure keep alive value reused
assertThat(info(threadPool, Names.SEARCH).getKeepAlive().minutes(), equalTo(10L));
assertThat(threadPool.executor(Names.SEARCH), instanceOf(EsThreadPoolExecutor.class));
assertThat(info(threadPool, threadPoolName).getKeepAlive().minutes(), equalTo(10L));
assertThat(threadPool.executor(threadPoolName), instanceOf(EsThreadPoolExecutor.class));
// Change keep alive
Executor oldExecutor = threadPool.executor(Names.SEARCH);
threadPool.updateSettings(settingsBuilder().put("threadpool.search.keep_alive", "1m").build());
Executor oldExecutor = threadPool.executor(threadPoolName);
threadPool.updateSettings(settingsBuilder().put("threadpool." + threadPoolName + ".keep_alive", "1m").build());
// Make sure keep alive value changed
assertThat(info(threadPool, Names.SEARCH).getKeepAlive().minutes(), equalTo(1L));
assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(1L));
assertThat(info(threadPool, threadPoolName).getKeepAlive().minutes(), equalTo(1L));
assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(1L));
// Make sure executor didn't change
assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("cached"));
assertThat(threadPool.executor(Names.SEARCH), sameInstance(oldExecutor));
assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.CACHED);
assertThat(threadPool.executor(threadPoolName), sameInstance(oldExecutor));
// Set the same keep alive
threadPool.updateSettings(settingsBuilder().put("threadpool.search.keep_alive", "1m").build());
threadPool.updateSettings(settingsBuilder().put("threadpool." + threadPoolName + ".keep_alive", "1m").build());
// Make sure keep alive value didn't change
assertThat(info(threadPool, Names.SEARCH).getKeepAlive().minutes(), equalTo(1L));
assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(1L));
assertThat(info(threadPool, threadPoolName).getKeepAlive().minutes(), equalTo(1L));
assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(1L));
// Make sure executor didn't change
assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("cached"));
assertThat(threadPool.executor(Names.SEARCH), sameInstance(oldExecutor));
terminate(threadPool);
assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.CACHED);
assertThat(threadPool.executor(threadPoolName), sameInstance(oldExecutor));
} finally {
terminateThreadPoolIfNeeded(threadPool);
}
}
public void testFixedExecutorType() throws InterruptedException {
ThreadPool threadPool = new ThreadPool(settingsBuilder()
.put("threadpool.search.type", "fixed")
.put("name","testCachedExecutorType").build());
String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.FIXED);
ThreadPool threadPool = null;
assertThat(threadPool.executor(Names.SEARCH), instanceOf(EsThreadPoolExecutor.class));
try {
threadPool = new ThreadPool(settingsBuilder()
.put("name", "testCachedExecutorType").build());
assertThat(threadPool.executor(threadPoolName), instanceOf(EsThreadPoolExecutor.class));
// Replace with different type
threadPool.updateSettings(settingsBuilder()
.put("threadpool.search.type", "scaling")
.put("threadpool.search.keep_alive", "10m")
.put("threadpool.search.min", "2")
.put("threadpool.search.size", "15")
.put("threadpool." + threadPoolName + ".size", "15")
.build());
assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("scaling"));
assertThat(threadPool.executor(Names.SEARCH), instanceOf(EsThreadPoolExecutor.class));
assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getCorePoolSize(), equalTo(2));
assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getMaximumPoolSize(), equalTo(15));
assertThat(info(threadPool, Names.SEARCH).getMin(), equalTo(2));
assertThat(info(threadPool, Names.SEARCH).getMax(), equalTo(15));
// Make sure keep alive value changed
assertThat(info(threadPool, Names.SEARCH).getKeepAlive().minutes(), equalTo(10L));
assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(10L));
assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.FIXED);
assertThat(threadPool.executor(threadPoolName), instanceOf(EsThreadPoolExecutor.class));
assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getCorePoolSize(), equalTo(15));
assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getMaximumPoolSize(), equalTo(15));
assertThat(info(threadPool, threadPoolName).getMin(), equalTo(15));
assertThat(info(threadPool, threadPoolName).getMax(), equalTo(15));
// keep alive does not apply to fixed thread pools
assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(0L));
// Put old type back
threadPool.updateSettings(settingsBuilder()
.put("threadpool.search.type", "fixed")
.build());
assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("fixed"));
threadPool.updateSettings(Settings.EMPTY);
assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.FIXED);
// Make sure keep alive value is not used
assertThat(info(threadPool, Names.SEARCH).getKeepAlive(), nullValue());
assertThat(info(threadPool, threadPoolName).getKeepAlive(), nullValue());
// Make sure keep pool size value were reused
assertThat(info(threadPool, Names.SEARCH).getMin(), equalTo(15));
assertThat(info(threadPool, Names.SEARCH).getMax(), equalTo(15));
assertThat(threadPool.executor(Names.SEARCH), instanceOf(EsThreadPoolExecutor.class));
assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getCorePoolSize(), equalTo(15));
assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getMaximumPoolSize(), equalTo(15));
assertThat(info(threadPool, threadPoolName).getMin(), equalTo(15));
assertThat(info(threadPool, threadPoolName).getMax(), equalTo(15));
assertThat(threadPool.executor(threadPoolName), instanceOf(EsThreadPoolExecutor.class));
assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getCorePoolSize(), equalTo(15));
assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getMaximumPoolSize(), equalTo(15));
// Change size
Executor oldExecutor = threadPool.executor(Names.SEARCH);
threadPool.updateSettings(settingsBuilder().put("threadpool.search.size", "10").build());
Executor oldExecutor = threadPool.executor(threadPoolName);
threadPool.updateSettings(settingsBuilder().put("threadpool." + threadPoolName + ".size", "10").build());
// Make sure size values changed
assertThat(info(threadPool, Names.SEARCH).getMax(), equalTo(10));
assertThat(info(threadPool, Names.SEARCH).getMin(), equalTo(10));
assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getMaximumPoolSize(), equalTo(10));
assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getCorePoolSize(), equalTo(10));
assertThat(info(threadPool, threadPoolName).getMax(), equalTo(10));
assertThat(info(threadPool, threadPoolName).getMin(), equalTo(10));
assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getMaximumPoolSize(), equalTo(10));
assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getCorePoolSize(), equalTo(10));
// Make sure executor didn't change
assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("fixed"));
assertThat(threadPool.executor(Names.SEARCH), sameInstance(oldExecutor));
assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.FIXED);
assertThat(threadPool.executor(threadPoolName), sameInstance(oldExecutor));
// Change queue capacity
threadPool.updateSettings(settingsBuilder()
.put("threadpool.search.queue", "500")
.put("threadpool." + threadPoolName + ".queue", "500")
.build());
terminate(threadPool);
} finally {
terminateThreadPoolIfNeeded(threadPool);
}
}
public void testScalingExecutorType() throws InterruptedException {
ThreadPool threadPool = new ThreadPool(settingsBuilder()
.put("threadpool.search.type", "scaling")
.put("threadpool.search.size", 10)
.put("name","testCachedExecutorType").build());
assertThat(info(threadPool, Names.SEARCH).getMin(), equalTo(1));
assertThat(info(threadPool, Names.SEARCH).getMax(), equalTo(10));
assertThat(info(threadPool, Names.SEARCH).getKeepAlive().minutes(), equalTo(5L));
assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("scaling"));
assertThat(threadPool.executor(Names.SEARCH), instanceOf(EsThreadPoolExecutor.class));
String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.SCALING);
ThreadPool threadPool = null;
try {
threadPool = new ThreadPool(settingsBuilder()
.put("threadpool." + threadPoolName + ".size", 10)
.put("name", "testCachedExecutorType").build());
assertThat(info(threadPool, threadPoolName).getMin(), equalTo(1));
assertThat(info(threadPool, threadPoolName).getMax(), equalTo(10));
assertThat(info(threadPool, threadPoolName).getKeepAlive().minutes(), equalTo(5L));
assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.SCALING);
assertThat(threadPool.executor(threadPoolName), instanceOf(EsThreadPoolExecutor.class));
// Change settings that doesn't require pool replacement
Executor oldExecutor = threadPool.executor(Names.SEARCH);
Executor oldExecutor = threadPool.executor(threadPoolName);
threadPool.updateSettings(settingsBuilder()
.put("threadpool.search.type", "scaling")
.put("threadpool.search.keep_alive", "10m")
.put("threadpool.search.min", "2")
.put("threadpool.search.size", "15")
.put("threadpool." + threadPoolName + ".keep_alive", "10m")
.put("threadpool." + threadPoolName + ".min", "2")
.put("threadpool." + threadPoolName + ".size", "15")
.build());
assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("scaling"));
assertThat(threadPool.executor(Names.SEARCH), instanceOf(EsThreadPoolExecutor.class));
assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getCorePoolSize(), equalTo(2));
assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getMaximumPoolSize(), equalTo(15));
assertThat(info(threadPool, Names.SEARCH).getMin(), equalTo(2));
assertThat(info(threadPool, Names.SEARCH).getMax(), equalTo(15));
assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.SCALING);
assertThat(threadPool.executor(threadPoolName), instanceOf(EsThreadPoolExecutor.class));
assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getCorePoolSize(), equalTo(2));
assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getMaximumPoolSize(), equalTo(15));
assertThat(info(threadPool, threadPoolName).getMin(), equalTo(2));
assertThat(info(threadPool, threadPoolName).getMax(), equalTo(15));
// Make sure keep alive value changed
assertThat(info(threadPool, Names.SEARCH).getKeepAlive().minutes(), equalTo(10L));
assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(10L));
assertThat(threadPool.executor(Names.SEARCH), sameInstance(oldExecutor));
terminate(threadPool);
assertThat(info(threadPool, threadPoolName).getKeepAlive().minutes(), equalTo(10L));
assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(10L));
assertThat(threadPool.executor(threadPoolName), sameInstance(oldExecutor));
} finally {
terminateThreadPoolIfNeeded(threadPool);
}
}
public void testShutdownNowInterrupts() throws Exception {
ThreadPool threadPool = new ThreadPool(Settings.settingsBuilder()
.put("threadpool.search.type", "cached")
.put("name","testCachedExecutorType").build());
String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.FIXED);
ThreadPool threadPool = null;
try {
threadPool = new ThreadPool(Settings.settingsBuilder()
.put("threadpool." + threadPoolName + ".queue_size", 1000)
.put("name", "testCachedExecutorType").build());
assertEquals(info(threadPool, threadPoolName).getQueueSize().getSingles(), 1000L);
final CountDownLatch latch = new CountDownLatch(1);
ThreadPoolExecutor oldExecutor = (ThreadPoolExecutor) threadPool.executor(Names.SEARCH);
threadPool.executor(Names.SEARCH).execute(new Runnable() {
@Override
public void run() {
ThreadPoolExecutor oldExecutor = (ThreadPoolExecutor) threadPool.executor(threadPoolName);
threadPool.executor(threadPoolName).execute(() -> {
try {
new CountDownLatch(1).await();
} catch (InterruptedException ex) {
@ -214,35 +263,39 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase {
Thread.currentThread().interrupt();
}
}
});
threadPool.updateSettings(settingsBuilder().put("threadpool.search.type", "fixed").build());
assertThat(threadPool.executor(Names.SEARCH), not(sameInstance(oldExecutor)));
);
threadPool.updateSettings(settingsBuilder().put("threadpool." + threadPoolName + ".queue_size", 2000).build());
assertThat(threadPool.executor(threadPoolName), not(sameInstance(oldExecutor)));
assertThat(oldExecutor.isShutdown(), equalTo(true));
assertThat(oldExecutor.isTerminating(), equalTo(true));
assertThat(oldExecutor.isTerminated(), equalTo(false));
threadPool.shutdownNow(); // should interrupt the thread
latch.await(3, TimeUnit.SECONDS); // If this throws then shotdownNow didn't interrupt
terminate(threadPool);
latch.await(3, TimeUnit.SECONDS); // If this throws then ThreadPool#shutdownNow didn't interrupt
} finally {
terminateThreadPoolIfNeeded(threadPool);
}
}
public void testCustomThreadPool() throws Exception {
ThreadPool threadPool = new ThreadPool(Settings.settingsBuilder()
.put("threadpool.my_pool1.type", "cached")
ThreadPool threadPool = null;
try {
threadPool = new ThreadPool(Settings.settingsBuilder()
.put("threadpool.my_pool1.type", "scaling")
.put("threadpool.my_pool2.type", "fixed")
.put("threadpool.my_pool2.size", "1")
.put("threadpool.my_pool2.queue_size", "1")
.put("name", "testCustomThreadPool").build());
ThreadPoolInfo groups = threadPool.info();
boolean foundPool1 = false;
boolean foundPool2 = false;
outer: for (ThreadPool.Info info : groups) {
outer:
for (ThreadPool.Info info : groups) {
if ("my_pool1".equals(info.getName())) {
foundPool1 = true;
assertThat(info.getType(), equalTo("cached"));
assertEquals(info.getThreadPoolType(), ThreadPool.ThreadPoolType.SCALING);
} else if ("my_pool2".equals(info.getName())) {
foundPool2 = true;
assertThat(info.getType(), equalTo("fixed"));
assertEquals(info.getThreadPoolType(), ThreadPool.ThreadPoolType.FIXED);
assertThat(info.getMin(), equalTo(1));
assertThat(info.getMax(), equalTo(1));
assertThat(info.getQueueSize().singles(), equalTo(1l));
@ -268,16 +321,17 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase {
groups = threadPool.info();
foundPool1 = false;
foundPool2 = false;
outer: for (ThreadPool.Info info : groups) {
outer:
for (ThreadPool.Info info : groups) {
if ("my_pool1".equals(info.getName())) {
foundPool1 = true;
assertThat(info.getType(), equalTo("cached"));
assertEquals(info.getThreadPoolType(), ThreadPool.ThreadPoolType.SCALING);
} else if ("my_pool2".equals(info.getName())) {
foundPool2 = true;
assertThat(info.getMax(), equalTo(10));
assertThat(info.getMin(), equalTo(10));
assertThat(info.getQueueSize().singles(), equalTo(1l));
assertThat(info.getType(), equalTo("fixed"));
assertEquals(info.getThreadPoolType(), ThreadPool.ThreadPoolType.FIXED);
} else {
for (Field field : Names.class.getFields()) {
if (info.getName().equalsIgnoreCase(field.getName())) {
@ -290,7 +344,40 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase {
}
assertThat(foundPool1, is(true));
assertThat(foundPool2, is(true));
terminate(threadPool);
} finally {
terminateThreadPoolIfNeeded(threadPool);
}
}
private void terminateThreadPoolIfNeeded(ThreadPool threadPool) throws InterruptedException {
if (threadPool != null) {
terminate(threadPool);
}
}
private ThreadPool.Info info(ThreadPool threadPool, String name) {
for (ThreadPool.Info info : threadPool.info()) {
if (info.getName().equals(name)) {
return info;
}
}
return null;
}
private String randomThreadPoolName() {
Set<String> threadPoolNames = ThreadPool.THREAD_POOL_TYPES.keySet();
return randomFrom(threadPoolNames.toArray(new String[threadPoolNames.size()]));
}
private ThreadPool.ThreadPoolType randomIncorrectThreadPoolType(String threadPoolName) {
Set<ThreadPool.ThreadPoolType> set = new HashSet<>();
set.addAll(Arrays.asList(ThreadPool.ThreadPoolType.values()));
set.remove(ThreadPool.THREAD_POOL_TYPES.get(threadPoolName));
ThreadPool.ThreadPoolType invalidThreadPoolType = randomFrom(set.toArray(new ThreadPool.ThreadPoolType[set.size()]));
return invalidThreadPoolType;
}
private String randomThreadPool(ThreadPool.ThreadPoolType type) {
return randomFrom(ThreadPool.THREAD_POOL_TYPES.entrySet().stream().filter(t -> t.getValue().equals(type)).map(t -> t.getKey()).collect(Collectors.toList()));
}
}

View File

@ -112,7 +112,7 @@ public class KeyedLockTests extends ESTestCase {
try {
startLatch.await();
} catch (InterruptedException e) {
throw new RuntimeException();
throw new RuntimeException(e);
}
int numRuns = scaledRandomIntBetween(5000, 50000);
for (int i = 0; i < numRuns; i++) {

View File

@ -112,7 +112,7 @@ directory in the root of the plugin should be served.
=== Testing your plugin
When testing a Java plugin, it will only be auto-loaded if it is in the
`plugins/` directory. Use `bin/plugin install file://path/to/your/plugin`
`plugins/` directory. Use `bin/plugin install file:///path/to/your/plugin`
to install your plugin for testing.
You may also load your plugin within the test framework for integration tests.

View File

@ -165,6 +165,11 @@ The following are a list of settings (prefixed with `discovery.ec2`) that can fu
Defaults to `3s`. If no unit like `ms`, `s` or `m` is specified,
milliseconds are used.
`node_cache_time`::
How long the list of hosts is cached to prevent further requests to the AWS API.
Defaults to `10s`.
[IMPORTANT]
.Binding the network host
@ -195,7 +200,6 @@ as valid network host settings:
|`_ec2_` |equivalent to _ec2:privateIpv4_.
|==================================================================
[[discovery-ec2-permissions]]
===== Recommended EC2 Permissions

View File

@ -101,7 +101,7 @@ For instance, to install a plugin from your local file system, you could run:
[source,shell]
-----------------------------------
sudo bin/plugin install file:/path/to/plugin.zip
sudo bin/plugin install file:///path/to/plugin.zip
-----------------------------------
[[listing-removing]]

View File

@ -117,6 +117,10 @@ Removed support for multiple highlighter names, the only supported ones are: `pl
Removed support for the deprecated top level `filter` in the search api, replaced by `post_filter`.
==== `query_binary` and `filter_binary` removed
Removed support for the undocumented `query_binary` and `filter_binary` sections of a search request.
=== Parent/Child changes
The `children` aggregation, parent child inner hits and `has_child` and `has_parent` queries will not work on indices
@ -390,3 +394,11 @@ request cache and the field data cache.
This setting would arbitrarily pick the first interface not marked as loopback. Instead, specify by address
scope (e.g. `_local_,_site_` for all loopback and private network addresses) or by explicit interface names,
hostnames, or addresses.
=== Forbid changing of thread pool types
Previously, <<modules-threadpool,thread pool types>> could be dynamically adjusted. The thread pool type effectively
controls the backing queue for the thread pool and modifying this is an expert setting with minimal practical benefits
and high risk of being misused. The ability to change the thread pool type for any thread pool has been removed; do note
that it is still possible to adjust relevant thread pool parameters for each of the thread pools (e.g., depending on
the thread pool type, `keep_alive`, `queue_size`, etc.).

View File

@ -9,87 +9,92 @@ of discarded.
There are several thread pools, but the important ones include:
`generic`::
For generic operations (e.g., background node discovery).
Thread pool type is `cached`.
`index`::
For index/delete operations. Defaults to `fixed`
For index/delete operations. Thread pool type is `fixed`
with a size of `# of available processors`,
queue_size of `200`.
`search`::
For count/search operations. Defaults to `fixed`
For count/search operations. Thread pool type is `fixed`
with a size of `int((# of available_processors * 3) / 2) + 1`,
queue_size of `1000`.
`suggest`::
For suggest operations. Defaults to `fixed`
For suggest operations. Thread pool type is `fixed`
with a size of `# of available processors`,
queue_size of `1000`.
`get`::
For get operations. Defaults to `fixed`
For get operations. Thread pool type is `fixed`
with a size of `# of available processors`,
queue_size of `1000`.
`bulk`::
For bulk operations. Defaults to `fixed`
For bulk operations. Thread pool type is `fixed`
with a size of `# of available processors`,
queue_size of `50`.
`percolate`::
For percolate operations. Defaults to `fixed`
For percolate operations. Thread pool type is `fixed`
with a size of `# of available processors`,
queue_size of `1000`.
`snapshot`::
For snapshot/restore operations. Defaults to `scaling` with a
keep-alive of `5m` and a size of `min(5, (# of available processors)/2)`, max at 5.
For snapshot/restore operations. Thread pool type is `scaling` with a
keep-alive of `5m` and a size of `min(5, (# of available processors)/2)`.
`warmer`::
For segment warm-up operations. Defaults to `scaling` with a
keep-alive of `5m` and a size of `min(5, (# of available processors)/2)`, max at 5.
For segment warm-up operations. Thread pool type is `scaling` with a
keep-alive of `5m` and a size of `min(5, (# of available processors)/2)`.
`refresh`::
For refresh operations. Defaults to `scaling` with a
keep-alive of `5m` and a size of `min(10, (# of available processors)/2)`, max at 10.
For refresh operations. Thread pool type is `scaling` with a
keep-alive of `5m` and a size of `min(10, (# of available processors)/2)`.
`listener`::
Mainly for java client executing of action when listener threaded is set to true.
Default size of `(# of available processors)/2`, max at 10.
Thread pool type is `scaling` with a default size of `min(10, (# of available processors)/2)`.
Changing a specific thread pool can be done by setting its type and
specific type parameters, for example, changing the `index` thread pool
to have more threads:
Changing a specific thread pool can be done by setting its type-specific parameters; for example, changing the `index`
thread pool to have more threads:
[source,js]
--------------------------------------------------
threadpool:
index:
type: fixed
size: 30
--------------------------------------------------
NOTE: you can update threadpool settings live using
<<cluster-update-settings>>.
NOTE: you can update thread pool settings dynamically using <<cluster-update-settings>>.
[float]
[[types]]
=== Thread pool types
The following are the types of thread pools that can be used and their
respective parameters:
The following are the types of thread pools and their respective parameters:
[float]
==== `cache`
==== `cached`
The `cache` thread pool is an unbounded thread pool that will spawn a
thread if there are pending requests. Here is an example of how to set
it:
The `cached` thread pool is an unbounded thread pool that will spawn a
thread if there are pending requests. This thread pool is used to
prevent requests submitted to this pool from blocking or being
rejected. Unused threads in this thread pool will be terminated after
a keep alive expires (defaults to five minutes). The `cached` thread
pool is reserved for the <<modules-threadpool,`generic`>> thread pool.
The `keep_alive` parameter determines how long a thread should be kept
around in the thread pool without doing any work.
[source,js]
--------------------------------------------------
threadpool:
index:
type: cached
generic:
keep_alive: 2m
--------------------------------------------------
[float]
@ -111,7 +116,6 @@ full, it will abort the request.
--------------------------------------------------
threadpool:
index:
type: fixed
size: 30
queue_size: 1000
--------------------------------------------------
@ -130,7 +134,6 @@ around in the thread pool without it doing any work.
--------------------------------------------------
threadpool:
warmer:
type: scaling
size: 8
keep_alive: 2m
--------------------------------------------------

View File

@ -49,6 +49,7 @@ public interface AwsEc2Service extends LifecycleComponent<AwsEc2Service> {
public static final String GROUPS = "discovery.ec2.groups";
public static final String TAG_PREFIX = "discovery.ec2.tag.";
public static final String AVAILABILITY_ZONES = "discovery.ec2.availability_zones";
public static final String NODE_CACHE_TIME = "discovery.ec2.node_cache_time";
}
AmazonEC2 client();

View File

@ -19,10 +19,13 @@
package org.elasticsearch.cloud.aws;
import com.amazonaws.AmazonClientException;
import com.amazonaws.AmazonWebServiceRequest;
import com.amazonaws.ClientConfiguration;
import com.amazonaws.Protocol;
import com.amazonaws.auth.*;
import com.amazonaws.internal.StaticCredentialsProvider;
import com.amazonaws.retry.RetryPolicy;
import com.amazonaws.services.ec2.AmazonEC2;
import com.amazonaws.services.ec2.AmazonEC2Client;
import org.elasticsearch.ElasticsearchException;
@ -36,6 +39,7 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsFilter;
import java.util.Locale;
import java.util.Random;
/**
*
@ -103,6 +107,24 @@ public class AwsEc2ServiceImpl extends AbstractLifecycleComponent<AwsEc2Service>
}
}
// Increase the number of retries in case of 5xx API responses
final Random rand = new Random();
RetryPolicy retryPolicy = new RetryPolicy(
RetryPolicy.RetryCondition.NO_RETRY_CONDITION,
new RetryPolicy.BackoffStrategy() {
@Override
public long delayBeforeNextRetry(AmazonWebServiceRequest originalRequest,
AmazonClientException exception,
int retriesAttempted) {
// with 10 retries the max delay time is 320s/320000ms (10 * 2^5 * 1 * 1000)
logger.warn("EC2 API request failed, retry again. Reason was:", exception);
return 1000L * (long) (10d * Math.pow(2, ((double) retriesAttempted) / 2.0d) * (1.0d + rand.nextDouble()));
}
},
10,
false);
clientConfiguration.setRetryPolicy(retryPolicy);
AWSCredentialsProvider credentials;
if (account == null && key == null) {
@ -134,6 +156,8 @@ public class AwsEc2ServiceImpl extends AbstractLifecycleComponent<AwsEc2Service>
endpoint = "ec2.us-west-2.amazonaws.com";
} else if (region.equals("ap-southeast") || region.equals("ap-southeast-1")) {
endpoint = "ec2.ap-southeast-1.amazonaws.com";
} else if (region.equals("us-gov-west") || region.equals("us-gov-west-1")) {
endpoint = "ec2.us-gov-west-1.amazonaws.com";
} else if (region.equals("ap-southeast-2")) {
endpoint = "ec2.ap-southeast-2.amazonaws.com";
} else if (region.equals("ap-northeast") || region.equals("ap-northeast-1")) {

View File

@ -31,6 +31,8 @@ import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.SingleObjectCache;
import org.elasticsearch.discovery.zen.ping.unicast.UnicastHostsProvider;
import org.elasticsearch.transport.TransportService;
@ -64,6 +66,8 @@ public class AwsEc2UnicastHostsProvider extends AbstractComponent implements Uni
private final HostType hostType;
private final DiscoNodesCache discoNodes;
@Inject
public AwsEc2UnicastHostsProvider(Settings settings, TransportService transportService, AwsEc2Service awsEc2Service, Version version) {
super(settings);
@ -74,6 +78,9 @@ public class AwsEc2UnicastHostsProvider extends AbstractComponent implements Uni
this.hostType = HostType.valueOf(settings.get(DISCOVERY_EC2.HOST_TYPE, "private_ip")
.toUpperCase(Locale.ROOT));
this.discoNodes = new DiscoNodesCache(this.settings.getAsTime(DISCOVERY_EC2.NODE_CACHE_TIME,
TimeValue.timeValueMillis(10_000L)));
this.bindAnyGroup = settings.getAsBoolean(DISCOVERY_EC2.ANY_GROUP, true);
this.groups = new HashSet<>();
groups.addAll(Arrays.asList(settings.getAsArray(DISCOVERY_EC2.GROUPS)));
@ -94,6 +101,11 @@ public class AwsEc2UnicastHostsProvider extends AbstractComponent implements Uni
@Override
public List<DiscoveryNode> buildDynamicNodes() {
return discoNodes.getOrRefresh();
}
protected List<DiscoveryNode> fetchDynamicNodes() {
List<DiscoveryNode> discoNodes = new ArrayList<>();
DescribeInstancesResult descInstances;
@ -199,4 +211,25 @@ public class AwsEc2UnicastHostsProvider extends AbstractComponent implements Uni
return describeInstancesRequest;
}
private final class DiscoNodesCache extends SingleObjectCache<List<DiscoveryNode>> {
private boolean empty = true;
protected DiscoNodesCache(TimeValue refreshInterval) {
super(refreshInterval, new ArrayList<>());
}
@Override
protected boolean needsRefresh() {
return (empty || super.needsRefresh());
}
@Override
protected List<DiscoveryNode> refresh() {
List<DiscoveryNode> nodes = fetchDynamicNodes();
empty = nodes.isEmpty();
return nodes;
}
}
}

View File

@ -32,6 +32,7 @@ import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.transport.MockTransportService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.transport.local.LocalTransport;
import org.junit.AfterClass;
import org.junit.Before;
@ -231,4 +232,47 @@ public class Ec2DiscoveryTests extends ESTestCase {
assertThat(discoveryNodes, hasSize(prodInstances));
}
abstract class DummyEc2HostProvider extends AwsEc2UnicastHostsProvider {
public int fetchCount = 0;
public DummyEc2HostProvider(Settings settings, TransportService transportService, AwsEc2Service service, Version version) {
super(settings, transportService, service, version);
}
}
public void testGetNodeListEmptyCache() throws Exception {
AwsEc2Service awsEc2Service = new AwsEc2ServiceMock(Settings.EMPTY, 1, null);
DummyEc2HostProvider provider = new DummyEc2HostProvider(Settings.EMPTY, transportService, awsEc2Service, Version.CURRENT) {
@Override
protected List<DiscoveryNode> fetchDynamicNodes() {
fetchCount++;
return new ArrayList<>();
}
};
for (int i=0; i<3; i++) {
provider.buildDynamicNodes();
}
assertThat(provider.fetchCount, is(3));
}
public void testGetNodeListCached() throws Exception {
Settings.Builder builder = Settings.settingsBuilder()
.put(DISCOVERY_EC2.NODE_CACHE_TIME, "500ms");
AwsEc2Service awsEc2Service = new AwsEc2ServiceMock(Settings.EMPTY, 1, null);
DummyEc2HostProvider provider = new DummyEc2HostProvider(builder.build(), transportService, awsEc2Service, Version.CURRENT) {
@Override
protected List<DiscoveryNode> fetchDynamicNodes() {
fetchCount++;
return Ec2DiscoveryTests.this.buildDynamicNodes(Settings.EMPTY, 1);
}
};
for (int i=0; i<3; i++) {
provider.buildDynamicNodes();
}
assertThat(provider.fetchCount, is(1));
Thread.sleep(1_000L); // wait for cache to expire
for (int i=0; i<3; i++) {
provider.buildDynamicNodes();
}
assertThat(provider.fetchCount, is(2));
}
}

View File

@ -23,7 +23,7 @@ esplugin {
}
dependencies {
compile 'org.mozilla:rhino:1.7R4'
compile 'org.mozilla:rhino:1.7.7'
}
compileJava.options.compilerArgs << "-Xlint:-rawtypes,-unchecked"

View File

@ -0,0 +1 @@
3a9ea863b86126b0ed8f2fe2230412747cd3c254

View File

@ -1 +0,0 @@
e982f2136574b9a423186fbaeaaa98dc3e5a5288

View File

@ -28,6 +28,11 @@ import org.elasticsearch.script.javascript.JavaScriptScriptEngineService;
*/
public class JavaScriptPlugin extends Plugin {
static {
// install rhino policy on plugin init
JavaScriptScriptEngineService.init();
}
@Override
public String name() {
return "lang-javascript";

View File

@ -58,6 +58,34 @@ public class JavaScriptScriptEngineService extends AbstractComponent implements
private Scriptable globalScope;
// one time initialization of rhino security manager integration
private static final CodeSource DOMAIN;
static {
try {
DOMAIN = new CodeSource(new URL("file:" + BootstrapInfo.UNTRUSTED_CODEBASE), (Certificate[]) null);
} catch (MalformedURLException e) {
throw new RuntimeException(e);
}
SecurityController.initGlobal(new PolicySecurityController() {
@Override
public GeneratedClassLoader createClassLoader(ClassLoader parent, Object securityDomain) {
// don't let scripts compile other scripts
SecurityManager sm = System.getSecurityManager();
if (sm != null) {
sm.checkPermission(new SpecialPermission());
}
// check the domain, this is all we allow
if (securityDomain != DOMAIN) {
throw new SecurityException("illegal securityDomain: " + securityDomain);
}
return super.createClassLoader(parent, securityDomain);
}
});
}
/** ensures this engine is initialized */
public static void init() {}
@Inject
public JavaScriptScriptEngineService(Settings settings) {
super(settings);
@ -100,21 +128,11 @@ public class JavaScriptScriptEngineService extends AbstractComponent implements
@Override
public Object compile(String script) {
// we don't know why kind of safeguards rhino has,
// but just be safe
SecurityManager sm = System.getSecurityManager();
if (sm != null) {
sm.checkPermission(new SpecialPermission());
}
Context ctx = Context.enter();
try {
ctx.setWrapFactory(wrapFactory);
ctx.setOptimizationLevel(optimizationLevel);
ctx.setSecurityController(new PolicySecurityController());
return ctx.compileString(script, generateScriptName(), 1,
new CodeSource(new URL("file:" + BootstrapInfo.UNTRUSTED_CODEBASE), (Certificate[]) null));
} catch (MalformedURLException e) {
throw new RuntimeException(e);
return ctx.compileString(script, generateScriptName(), 1, DOMAIN);
} finally {
Context.exit();
}

View File

@ -23,8 +23,6 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.script.CompiledScript;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.test.ESTestCase;
import org.junit.After;
import org.junit.Before;
import org.mozilla.javascript.WrappedException;
import java.util.HashMap;
@ -37,14 +35,18 @@ public class JavaScriptSecurityTests extends ESTestCase {
private JavaScriptScriptEngineService se;
@Before
public void setup() {
@Override
public void setUp() throws Exception {
super.setUp();
se = new JavaScriptScriptEngineService(Settings.Builder.EMPTY_SETTINGS);
// otherwise will exit your VM and other bad stuff
assumeTrue("test requires security manager to be enabled", System.getSecurityManager() != null);
}
@After
public void close() {
@Override
public void tearDown() throws Exception {
se.close();
super.tearDown();
}
/** runs a script */
@ -86,4 +88,13 @@ public class JavaScriptSecurityTests extends ESTestCase {
// no files
assertFailure("java.io.File.createTempFile(\"test\", \"tmp\")");
}
public void testDefinitelyNotOK() {
// no mucking with security controller
assertFailure("var ctx = org.mozilla.javascript.Context.getCurrentContext(); " +
"ctx.setSecurityController(new org.mozilla.javascript.PolicySecurityController());");
// no compiling scripts from scripts
assertFailure("var ctx = org.mozilla.javascript.Context.getCurrentContext(); " +
"ctx.compileString(\"1 + 1\", \"foobar\", 1, null); ");
}
}

View File

@ -23,8 +23,6 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.script.CompiledScript;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.test.ESTestCase;
import org.junit.After;
import org.junit.Before;
import org.python.core.PyException;
import java.util.HashMap;
@ -37,17 +35,21 @@ public class PythonSecurityTests extends ESTestCase {
private PythonScriptEngineService se;
@Before
public void setup() {
@Override
public void setUp() throws Exception {
super.setUp();
se = new PythonScriptEngineService(Settings.Builder.EMPTY_SETTINGS);
// otherwise will exit your VM and other bad stuff
assumeTrue("test requires security manager to be enabled", System.getSecurityManager() != null);
}
@After
public void close() {
@Override
public void tearDown() throws Exception {
// We need to clear some system properties
System.clearProperty("python.cachedir.skip");
System.clearProperty("python.console.encoding");
se.close();
super.tearDown();
}
/** runs a script */

View File

@ -193,6 +193,8 @@ public class InternalAwsS3Service extends AbstractLifecycleComponent<AwsS3Servic
return "s3-sa-east-1.amazonaws.com";
} else if ("cn-north".equals(region) || "cn-north-1".equals(region)) {
return "s3.cn-north-1.amazonaws.com.cn";
} else if ("us-gov-west".equals(region) || "us-gov-west-1".equals(region)) {
return "s3-us-gov-west-1.amazonaws.com";
} else {
throw new IllegalArgumentException("No automatic endpoint could be derived from region [" + region + "]");
}

View File

@ -64,10 +64,10 @@ import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.http.HttpServerTransport;
import org.elasticsearch.index.IndexModule;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.MockEngineFactoryPlugin;
import org.elasticsearch.index.engine.CommitStats;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.MockEngineFactoryPlugin;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.store.IndexStoreConfig;
import org.elasticsearch.indices.IndicesService;
@ -88,7 +88,6 @@ import org.elasticsearch.test.disruption.ServiceDisruptionScheme;
import org.elasticsearch.test.store.MockFSIndexStore;
import org.elasticsearch.test.transport.AssertingLocalTransport;
import org.elasticsearch.test.transport.MockTransportService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.Transport;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.transport.netty.NettyTransport;
@ -98,20 +97,11 @@ import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.NavigableMap;
import java.util.Random;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.*;
import java.util.*;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Predicate;
@ -119,15 +109,11 @@ import java.util.stream.Collectors;
import java.util.stream.Stream;
import static junit.framework.Assert.fail;
import static org.apache.lucene.util.LuceneTestCase.TEST_NIGHTLY;
import static org.apache.lucene.util.LuceneTestCase.rarely;
import static org.apache.lucene.util.LuceneTestCase.usually;
import static org.apache.lucene.util.LuceneTestCase.*;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.test.ESTestCase.assertBusy;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
import static org.hamcrest.Matchers.*;
import static org.junit.Assert.assertThat;
/**
@ -404,18 +390,6 @@ public final class InternalTestCluster extends TestCluster {
if (random.nextBoolean()) { // sometimes set a
builder.put(SearchService.DEFAULT_KEEPALIVE_KEY, TimeValue.timeValueSeconds(100 + random.nextInt(5 * 60)));
}
if (random.nextBoolean()) {
// change threadpool types to make sure we don't have components that rely on the type of thread pools
for (String name : Arrays.asList(ThreadPool.Names.BULK, ThreadPool.Names.FLUSH, ThreadPool.Names.GET,
ThreadPool.Names.INDEX, ThreadPool.Names.MANAGEMENT, ThreadPool.Names.FORCE_MERGE,
ThreadPool.Names.PERCOLATE, ThreadPool.Names.REFRESH, ThreadPool.Names.SEARCH, ThreadPool.Names.SNAPSHOT,
ThreadPool.Names.SUGGEST, ThreadPool.Names.WARMER)) {
if (random.nextBoolean()) {
final String type = RandomPicks.randomFrom(random, Arrays.asList("fixed", "cached", "scaling"));
builder.put(ThreadPool.THREADPOOL_GROUP + name + ".type", type);
}
}
}
if (random.nextInt(10) == 0) {
// node gets an extra cpu this time