Merge branch 'master' into dots2
This commit is contained in:
commit
bbdbfe7373
|
@ -51,6 +51,11 @@ public class PluginBuildPlugin extends BuildPlugin {
|
|||
project.integTest.clusterConfig.plugin(name, project.bundlePlugin.outputs.files)
|
||||
project.tasks.run.clusterConfig.plugin(name, project.bundlePlugin.outputs.files)
|
||||
}
|
||||
|
||||
project.namingConventions {
|
||||
// Plugins decalare extensions of ESIntegTestCase as "Tests" instead of IT.
|
||||
skipIntegTestInDisguise = true
|
||||
}
|
||||
}
|
||||
createIntegTestTask(project)
|
||||
createBundleTask(project)
|
||||
|
@ -63,7 +68,7 @@ public class PluginBuildPlugin extends BuildPlugin {
|
|||
testCompile "org.elasticsearch.test:framework:${project.versions.elasticsearch}"
|
||||
// we "upgrade" these optional deps to provided for plugins, since they will run
|
||||
// with a full elasticsearch server that includes optional deps
|
||||
provided "com.spatial4j:spatial4j:${project.versions.spatial4j}"
|
||||
provided "org.locationtech.spatial4j:spatial4j:${project.versions.spatial4j}"
|
||||
provided "com.vividsolutions:jts:${project.versions.jts}"
|
||||
provided "log4j:log4j:${project.versions.log4j}"
|
||||
provided "log4j:apache-log4j-extras:${project.versions.log4j}"
|
||||
|
|
|
@ -68,11 +68,17 @@ class PluginPropertiesTask extends Copy {
|
|||
}
|
||||
|
||||
Map generateSubstitutions() {
|
||||
def stringSnap = { version ->
|
||||
if (version.endsWith("-SNAPSHOT")) {
|
||||
return version.substring(0, version.length() - 9)
|
||||
}
|
||||
return version
|
||||
}
|
||||
return [
|
||||
'name': extension.name,
|
||||
'description': extension.description,
|
||||
'version': extension.version,
|
||||
'elasticsearchVersion': VersionProperties.elasticsearch,
|
||||
'version': stringSnap(extension.version),
|
||||
'elasticsearchVersion': stringSnap(VersionProperties.elasticsearch),
|
||||
'javaVersion': project.targetCompatibility as String,
|
||||
'isolated': extension.isolated as String,
|
||||
'classname': extension.classname
|
||||
|
|
|
@ -0,0 +1,89 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.gradle.precommit
|
||||
|
||||
import org.elasticsearch.gradle.LoggedExec
|
||||
import org.elasticsearch.gradle.VersionProperties
|
||||
import org.gradle.api.file.FileCollection
|
||||
import org.gradle.api.tasks.Input
|
||||
import org.gradle.api.tasks.InputFiles
|
||||
import org.gradle.api.tasks.OutputFile
|
||||
|
||||
/**
|
||||
* Runs NamingConventionsCheck on a classpath/directory combo to verify that
|
||||
* tests are named according to our conventions so they'll be picked up by
|
||||
* gradle. Read the Javadoc for NamingConventionsCheck to learn more.
|
||||
*/
|
||||
public class NamingConventionsTask extends LoggedExec {
|
||||
/**
|
||||
* We use a simple "marker" file that we touch when the task succeeds
|
||||
* as the task output. This is compared against the modified time of the
|
||||
* inputs (ie the jars/class files).
|
||||
*/
|
||||
@OutputFile
|
||||
File successMarker = new File(project.buildDir, 'markers/namingConventions')
|
||||
|
||||
/**
|
||||
* The classpath to run the naming conventions checks against. Must contain the files in the test
|
||||
* output directory and everything required to load those classes.
|
||||
*
|
||||
* We don't declare the actual test files as a dependency or input because if they change then
|
||||
* this will change.
|
||||
*/
|
||||
@InputFiles
|
||||
FileCollection classpath = project.sourceSets.test.runtimeClasspath
|
||||
|
||||
/**
|
||||
* Should we skip the integ tests in disguise tests? Defaults to true because only core names its
|
||||
* integ tests correctly.
|
||||
*/
|
||||
@Input
|
||||
boolean skipIntegTestInDisguise = false
|
||||
|
||||
public NamingConventionsTask() {
|
||||
dependsOn(classpath)
|
||||
description = "Runs NamingConventionsCheck on ${classpath}"
|
||||
executable = new File(project.javaHome, 'bin/java')
|
||||
onlyIf { project.sourceSets.test.output.classesDir.exists() }
|
||||
/*
|
||||
* We build the arguments in a funny afterEvaluate/doFirst closure so that we can wait for the classpath to be
|
||||
* ready for us. Strangely neither one on their own are good enough.
|
||||
*/
|
||||
project.afterEvaluate {
|
||||
doFirst {
|
||||
args('-cp', classpath.asPath, 'org.elasticsearch.test.NamingConventionsCheck')
|
||||
if (skipIntegTestInDisguise) {
|
||||
args('--skip-integ-tests-in-disguise')
|
||||
}
|
||||
/*
|
||||
* The test framework has classes that fail the checks to validate that the checks fail properly.
|
||||
* Since these would cause the build to fail we have to ignore them with this parameter. The
|
||||
* process of ignoring them lets us validate that they were found so this ignore parameter acts
|
||||
* as the test for the NamingConventionsCheck.
|
||||
*/
|
||||
if (':test:framework'.equals(project.path)) {
|
||||
args('--self-test')
|
||||
}
|
||||
args('--', project.sourceSets.test.output.classesDir.absolutePath)
|
||||
}
|
||||
}
|
||||
doLast { successMarker.setText("", 'UTF-8') }
|
||||
}
|
||||
}
|
|
@ -33,6 +33,7 @@ class PrecommitTasks {
|
|||
List<Task> precommitTasks = [
|
||||
configureForbiddenApis(project),
|
||||
configureCheckstyle(project),
|
||||
configureNamingConventions(project),
|
||||
project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class),
|
||||
project.tasks.create('licenseHeaders', LicenseHeadersTask.class),
|
||||
project.tasks.create('jarHell', JarHellTask.class),
|
||||
|
@ -109,4 +110,11 @@ class PrecommitTasks {
|
|||
}
|
||||
return checkstyleTask
|
||||
}
|
||||
|
||||
private static Task configureNamingConventions(Project project) {
|
||||
if (project.sourceSets.findByName("test")) {
|
||||
return project.tasks.create('namingConventions', NamingConventionsTask)
|
||||
}
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
|
|
@ -207,16 +207,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]ShardSearchFailure.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]TransportClearScrollAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]TransportMultiSearchAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]TransportSearchAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]TransportSearchScrollAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]type[/\\]TransportSearchDfsQueryAndFetchAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]type[/\\]TransportSearchDfsQueryThenFetchAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]type[/\\]TransportSearchHelper.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]type[/\\]TransportSearchQueryAndFetchAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]type[/\\]TransportSearchQueryThenFetchAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]type[/\\]TransportSearchScrollQueryAndFetchAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]type[/\\]TransportSearchScrollQueryThenFetchAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]type[/\\]TransportSearchTypeAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]suggest[/\\]SuggestResponse.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]suggest[/\\]TransportSuggestAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]ActionFilter.java" checks="LineLength" />
|
||||
|
@ -756,7 +746,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]MultiValueMode.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]SearchModule.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]SearchService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]action[/\\]SearchServiceTransportAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]AggregatorFactories.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]AggregatorFactory.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]InternalAggregation.java" checks="LineLength" />
|
||||
|
@ -1497,7 +1486,6 @@
|
|||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]groovy[/\\]GroovyScriptEngineService.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]BucketScriptTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]BulkTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]DateRangeTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]DoubleTermsTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]EquivalenceTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]FunctionScoreTests.java" checks="LineLength" />
|
||||
|
@ -1535,7 +1523,6 @@
|
|||
<suppress files="plugins[/\\]analysis-icu[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]IcuNormalizerTokenFilterFactory.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]analysis-icu[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]IndexableBinaryStringTools.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]analysis-icu[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]AnalysisTestUtils.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]analysis-icu[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]TestIndexableBinaryStringTools.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]analysis-kuromoji[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]JapaneseStopTokenFilterFactory.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]analysis-kuromoji[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]KuromojiAnalysisTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]analysis-phonetic[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]PhoneticTokenFilterFactory.java" checks="LineLength" />
|
||||
|
@ -1616,7 +1603,6 @@
|
|||
<suppress files="qa[/\\]evil-tests[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]cli[/\\]CheckFileCommandTests.java" checks="LineLength" />
|
||||
<suppress files="qa[/\\]evil-tests[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]tribe[/\\]TribeUnitTests.java" checks="LineLength" />
|
||||
<suppress files="qa[/\\]smoke-test-client[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]smoketest[/\\]ESSmokeClientTestCase.java" checks="LineLength" />
|
||||
<suppress files="qa[/\\]smoke-test-ingest-with-all-dependencies[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]AbstractMustacheTests.java" checks="LineLength" />
|
||||
<suppress files="qa[/\\]smoke-test-ingest-with-all-dependencies[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]CombineProcessorsTests.java" checks="LineLength" />
|
||||
<suppress files="qa[/\\]smoke-test-ingest-with-all-dependencies[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]IngestDocumentMustacheIT.java" checks="LineLength" />
|
||||
<suppress files="qa[/\\]smoke-test-ingest-with-all-dependencies[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]IngestMustacheSetProcessorIT.java" checks="LineLength" />
|
||||
|
|
|
@ -33,20 +33,6 @@ java.util.Formatter#<init>(java.lang.String,java.lang.String,java.util.Locale)
|
|||
java.io.RandomAccessFile
|
||||
java.nio.file.Path#toFile()
|
||||
|
||||
@defaultMessage Don't use deprecated lucene apis
|
||||
org.apache.lucene.index.DocsEnum
|
||||
org.apache.lucene.index.DocsAndPositionsEnum
|
||||
org.apache.lucene.queries.TermFilter
|
||||
org.apache.lucene.queries.TermsFilter
|
||||
org.apache.lucene.search.Filter
|
||||
org.apache.lucene.search.FilteredQuery
|
||||
org.apache.lucene.search.TermRangeFilter
|
||||
org.apache.lucene.search.NumericRangeFilter
|
||||
org.apache.lucene.search.PrefixFilter
|
||||
org.apache.lucene.search.QueryWrapperFilter
|
||||
org.apache.lucene.search.join.BitDocIdSetCachingWrapperFilter
|
||||
org.apache.lucene.index.IndexWriter#isLocked(org.apache.lucene.store.Directory)
|
||||
|
||||
java.nio.file.Paths @ Use org.elasticsearch.common.io.PathUtils.get() instead.
|
||||
java.nio.file.FileSystems#getDefault() @ use org.elasticsearch.common.io.PathUtils.getDefaultFileSystem() instead.
|
||||
|
||||
|
|
|
@ -41,14 +41,10 @@ org.apache.lucene.index.IndexReader#addReaderClosedListener(org.apache.lucene.in
|
|||
org.apache.lucene.index.IndexReader#removeReaderClosedListener(org.apache.lucene.index.IndexReader$ReaderClosedListener)
|
||||
|
||||
@defaultMessage Pass the precision step from the mappings explicitly instead
|
||||
org.apache.lucene.search.NumericRangeQuery#newDoubleRange(java.lang.String,java.lang.Double,java.lang.Double,boolean,boolean)
|
||||
org.apache.lucene.search.NumericRangeQuery#newFloatRange(java.lang.String,java.lang.Float,java.lang.Float,boolean,boolean)
|
||||
org.apache.lucene.search.NumericRangeQuery#newIntRange(java.lang.String,java.lang.Integer,java.lang.Integer,boolean,boolean)
|
||||
org.apache.lucene.search.NumericRangeQuery#newLongRange(java.lang.String,java.lang.Long,java.lang.Long,boolean,boolean)
|
||||
org.apache.lucene.search.NumericRangeFilter#newDoubleRange(java.lang.String,java.lang.Double,java.lang.Double,boolean,boolean)
|
||||
org.apache.lucene.search.NumericRangeFilter#newFloatRange(java.lang.String,java.lang.Float,java.lang.Float,boolean,boolean)
|
||||
org.apache.lucene.search.NumericRangeFilter#newIntRange(java.lang.String,java.lang.Integer,java.lang.Integer,boolean,boolean)
|
||||
org.apache.lucene.search.NumericRangeFilter#newLongRange(java.lang.String,java.lang.Long,java.lang.Long,boolean,boolean)
|
||||
org.apache.lucene.search.LegacyNumericRangeQuery#newDoubleRange(java.lang.String,java.lang.Double,java.lang.Double,boolean,boolean)
|
||||
org.apache.lucene.search.LegacyNumericRangeQuery#newFloatRange(java.lang.String,java.lang.Float,java.lang.Float,boolean,boolean)
|
||||
org.apache.lucene.search.LegacyNumericRangeQuery#newIntRange(java.lang.String,java.lang.Integer,java.lang.Integer,boolean,boolean)
|
||||
org.apache.lucene.search.LegacyNumericRangeQuery#newLongRange(java.lang.String,java.lang.Long,java.lang.Long,boolean,boolean)
|
||||
|
||||
@defaultMessage Only use wait / notify when really needed try to use concurrency primitives, latches or callbacks instead.
|
||||
java.lang.Object#wait()
|
||||
|
@ -88,9 +84,6 @@ java.util.concurrent.Future#cancel(boolean)
|
|||
org.elasticsearch.common.io.PathUtils#get(java.lang.String, java.lang.String[])
|
||||
org.elasticsearch.common.io.PathUtils#get(java.net.URI)
|
||||
|
||||
@defaultMessage Don't use deprecated Query#setBoost, wrap the query into a BoostQuery instead
|
||||
org.apache.lucene.search.Query#setBoost(float)
|
||||
|
||||
@defaultMessage Constructing a DateTime without a time zone is dangerous
|
||||
org.joda.time.DateTime#<init>()
|
||||
org.joda.time.DateTime#<init>(long)
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
elasticsearch = 3.0.0
|
||||
lucene = 5.5.0
|
||||
elasticsearch = 5.0.0
|
||||
lucene = 6.0.0-snapshot-bea235f
|
||||
|
||||
# optional dependencies
|
||||
spatial4j = 0.5
|
||||
spatial4j = 0.6
|
||||
jts = 1.13
|
||||
jackson = 2.7.1
|
||||
log4j = 1.2.17
|
||||
|
|
|
@ -42,6 +42,7 @@ dependencies {
|
|||
compile "org.apache.lucene:lucene-queryparser:${versions.lucene}"
|
||||
compile "org.apache.lucene:lucene-sandbox:${versions.lucene}"
|
||||
compile "org.apache.lucene:lucene-spatial:${versions.lucene}"
|
||||
compile "org.apache.lucene:lucene-spatial-extras:${versions.lucene}"
|
||||
compile "org.apache.lucene:lucene-spatial3d:${versions.lucene}"
|
||||
compile "org.apache.lucene:lucene-suggest:${versions.lucene}"
|
||||
|
||||
|
@ -71,7 +72,7 @@ dependencies {
|
|||
compile 'org.hdrhistogram:HdrHistogram:2.1.6'
|
||||
|
||||
// lucene spatial
|
||||
compile "com.spatial4j:spatial4j:${versions.spatial4j}", optional
|
||||
compile "org.locationtech.spatial4j:spatial4j:${versions.spatial4j}", optional
|
||||
compile "com.vividsolutions:jts:${versions.jts}", optional
|
||||
|
||||
// logging
|
||||
|
@ -168,11 +169,6 @@ thirdPartyAudit.excludes = [
|
|||
'org.apache.commons.logging.Log',
|
||||
'org.apache.commons.logging.LogFactory',
|
||||
|
||||
// from org.apache.lucene.sandbox.queries.regex.JakartaRegexpCapabilities$JakartaRegexMatcher (lucene-sandbox)
|
||||
'org.apache.regexp.CharacterIterator',
|
||||
'org.apache.regexp.RE',
|
||||
'org.apache.regexp.REProgram',
|
||||
|
||||
// from org.jboss.netty.handler.ssl.OpenSslEngine (netty)
|
||||
'org.apache.tomcat.jni.Buffer',
|
||||
'org.apache.tomcat.jni.Library',
|
||||
|
@ -210,7 +206,7 @@ thirdPartyAudit.excludes = [
|
|||
'org.jboss.marshalling.MarshallingConfiguration',
|
||||
'org.jboss.marshalling.Unmarshaller',
|
||||
|
||||
// from com.spatial4j.core.io.GeoJSONReader (spatial4j)
|
||||
// from org.locationtech.spatial4j.io.GeoJSONReader (spatial4j)
|
||||
'org.noggit.JSONParser',
|
||||
|
||||
// from org.jboss.netty.container.osgi.NettyBundleActivator (netty)
|
||||
|
|
|
@ -33,7 +33,6 @@ import org.apache.lucene.search.Query;
|
|||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.util.ArrayUtil;
|
||||
import org.apache.lucene.util.InPlaceMergeSorter;
|
||||
import org.apache.lucene.util.ToStringUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -247,14 +246,15 @@ public abstract class BlendedTermQuery extends Query {
|
|||
if (boosts != null) {
|
||||
boost = boosts[i];
|
||||
}
|
||||
builder.append(ToStringUtils.boost(boost));
|
||||
if (boost != 1f) {
|
||||
builder.append('^').append(boost);
|
||||
}
|
||||
builder.append(", ");
|
||||
}
|
||||
if (terms.length > 0) {
|
||||
builder.setLength(builder.length() - 2);
|
||||
}
|
||||
builder.append("])");
|
||||
builder.append(ToStringUtils.boost(getBoost()));
|
||||
return builder.toString();
|
||||
}
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.apache.lucene.analysis.TokenStream;
|
|||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.BoostQuery;
|
||||
import org.apache.lucene.search.DisjunctionMaxQuery;
|
||||
import org.apache.lucene.search.FuzzyQuery;
|
||||
|
@ -165,7 +166,7 @@ public class MapperQueryParser extends QueryParser {
|
|||
}
|
||||
if (clauses.size() == 0) // happens for stopwords
|
||||
return null;
|
||||
return getBooleanQuery(clauses, true);
|
||||
return getBooleanQueryCoordDisabled(clauses);
|
||||
}
|
||||
} else {
|
||||
return getFieldQuerySingle(field, queryText, quoted);
|
||||
|
@ -267,7 +268,7 @@ public class MapperQueryParser extends QueryParser {
|
|||
}
|
||||
if (clauses.size() == 0) // happens for stopwords
|
||||
return null;
|
||||
return getBooleanQuery(clauses, true);
|
||||
return getBooleanQueryCoordDisabled(clauses);
|
||||
}
|
||||
} else {
|
||||
return super.getFieldQuery(field, queryText, slop);
|
||||
|
@ -318,7 +319,7 @@ public class MapperQueryParser extends QueryParser {
|
|||
}
|
||||
if (clauses.size() == 0) // happens for stopwords
|
||||
return null;
|
||||
return getBooleanQuery(clauses, true);
|
||||
return getBooleanQueryCoordDisabled(clauses);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -380,7 +381,7 @@ public class MapperQueryParser extends QueryParser {
|
|||
clauses.add(new BooleanClause(applyBoost(mField, q), BooleanClause.Occur.SHOULD));
|
||||
}
|
||||
}
|
||||
return getBooleanQuery(clauses, true);
|
||||
return getBooleanQueryCoordDisabled(clauses);
|
||||
}
|
||||
} else {
|
||||
return getFuzzyQuerySingle(field, termStr, minSimilarity);
|
||||
|
@ -445,7 +446,7 @@ public class MapperQueryParser extends QueryParser {
|
|||
}
|
||||
if (clauses.size() == 0) // happens for stopwords
|
||||
return null;
|
||||
return getBooleanQuery(clauses, true);
|
||||
return getBooleanQueryCoordDisabled(clauses);
|
||||
}
|
||||
} else {
|
||||
return getPrefixQuerySingle(field, termStr);
|
||||
|
@ -520,7 +521,7 @@ public class MapperQueryParser extends QueryParser {
|
|||
for (String token : tlist) {
|
||||
clauses.add(new BooleanClause(super.getPrefixQuery(field, token), BooleanClause.Occur.SHOULD));
|
||||
}
|
||||
return getBooleanQuery(clauses, true);
|
||||
return getBooleanQueryCoordDisabled(clauses);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -575,7 +576,7 @@ public class MapperQueryParser extends QueryParser {
|
|||
}
|
||||
if (clauses.size() == 0) // happens for stopwords
|
||||
return null;
|
||||
return getBooleanQuery(clauses, true);
|
||||
return getBooleanQueryCoordDisabled(clauses);
|
||||
}
|
||||
} else {
|
||||
return getWildcardQuerySingle(field, termStr);
|
||||
|
@ -704,7 +705,7 @@ public class MapperQueryParser extends QueryParser {
|
|||
}
|
||||
if (clauses.size() == 0) // happens for stopwords
|
||||
return null;
|
||||
return getBooleanQuery(clauses, true);
|
||||
return getBooleanQueryCoordDisabled(clauses);
|
||||
}
|
||||
} else {
|
||||
return getRegexpQuerySingle(field, termStr);
|
||||
|
@ -739,10 +740,24 @@ public class MapperQueryParser extends QueryParser {
|
|||
setAnalyzer(oldAnalyzer);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated review all use of this, don't rely on coord
|
||||
*/
|
||||
@Deprecated
|
||||
protected Query getBooleanQueryCoordDisabled(List<BooleanClause> clauses) throws ParseException {
|
||||
BooleanQuery.Builder builder = new BooleanQuery.Builder();
|
||||
builder.setDisableCoord(true);
|
||||
for (BooleanClause clause : clauses) {
|
||||
builder.add(clause);
|
||||
}
|
||||
return fixNegativeQueryIfNeeded(builder.build());
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected Query getBooleanQuery(List<BooleanClause> clauses, boolean disableCoord) throws ParseException {
|
||||
Query q = super.getBooleanQuery(clauses, disableCoord);
|
||||
protected Query getBooleanQuery(List<BooleanClause> clauses) throws ParseException {
|
||||
Query q = super.getBooleanQuery(clauses);
|
||||
if (q == null) {
|
||||
return null;
|
||||
}
|
||||
|
@ -769,7 +784,6 @@ public class MapperQueryParser extends QueryParser {
|
|||
}
|
||||
pq = builder.build();
|
||||
//make sure that the boost hasn't been set beforehand, otherwise we'd lose it
|
||||
assert q.getBoost() == 1f;
|
||||
assert q instanceof BoostQuery == false;
|
||||
return pq;
|
||||
} else if (q instanceof MultiPhraseQuery) {
|
||||
|
|
|
@ -26,8 +26,7 @@ import java.io.IOException;
|
|||
/**
|
||||
* Abstract decorator class of a DocIdSetIterator
|
||||
* implementation that provides on-demand filter/validation
|
||||
* mechanism on an underlying DocIdSetIterator. See {@link
|
||||
* FilteredDocIdSet}.
|
||||
* mechanism on an underlying DocIdSetIterator.
|
||||
*/
|
||||
public abstract class XFilteredDocIdSetIterator extends DocIdSetIterator {
|
||||
protected DocIdSetIterator _innerIter;
|
||||
|
|
|
@ -87,7 +87,7 @@ public class CustomFieldQuery extends FieldQuery {
|
|||
if (numTerms > 16) {
|
||||
for (Term[] currentPosTerm : terms) {
|
||||
for (Term term : currentPosTerm) {
|
||||
super.flatten(new TermQuery(term), reader, flatQueries, orig.getBoost());
|
||||
super.flatten(new TermQuery(term), reader, flatQueries, 1F);
|
||||
}
|
||||
}
|
||||
return;
|
||||
|
@ -104,7 +104,7 @@ public class CustomFieldQuery extends FieldQuery {
|
|||
queryBuilder.add(terms.get(i)[termsIdx[i]], pos[i]);
|
||||
}
|
||||
Query query = queryBuilder.build();
|
||||
this.flatten(query, reader, flatQueries, orig.getBoost());
|
||||
this.flatten(query, reader, flatQueries, 1F);
|
||||
} else {
|
||||
Term[] t = terms.get(currentPos);
|
||||
for (int i = 0; i < t.length; i++) {
|
||||
|
|
|
@ -35,212 +35,10 @@ import java.io.IOException;
|
|||
@SuppressWarnings("deprecation")
|
||||
public class Version {
|
||||
|
||||
// The logic for ID is: XXYYZZAA, where XX is major version, YY is minor version, ZZ is revision, and AA is Beta/RC indicator
|
||||
// AA values below 50 are beta builds, and below 99 are RC builds, with 99 indicating a release
|
||||
// The logic for ID is: XXYYZZAA, where XX is major version, YY is minor version, ZZ is revision, and AA is alpha/beta/rc indicator
|
||||
// AA values below 25 are for alpha builder (since 5.0), and above 25 and below 50 are beta builds, and below 99 are RC builds, with 99 indicating a release
|
||||
// the (internal) format of the id is there so we can easily do after/before checks on the id
|
||||
|
||||
// NOTE: indexes created with 3.6 use this constant for e.g. analysis chain emulation (imperfect)
|
||||
public static final org.apache.lucene.util.Version LUCENE_3_EMULATION_VERSION = org.apache.lucene.util.Version.LUCENE_4_0_0;
|
||||
|
||||
public static final int V_0_18_0_ID = /*00*/180099;
|
||||
public static final Version V_0_18_0 = new Version(V_0_18_0_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_18_1_ID = /*00*/180199;
|
||||
public static final Version V_0_18_1 = new Version(V_0_18_1_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_18_2_ID = /*00*/180299;
|
||||
public static final Version V_0_18_2 = new Version(V_0_18_2_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_18_3_ID = /*00*/180399;
|
||||
public static final Version V_0_18_3 = new Version(V_0_18_3_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_18_4_ID = /*00*/180499;
|
||||
public static final Version V_0_18_4 = new Version(V_0_18_4_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_18_5_ID = /*00*/180599;
|
||||
public static final Version V_0_18_5 = new Version(V_0_18_5_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_18_6_ID = /*00*/180699;
|
||||
public static final Version V_0_18_6 = new Version(V_0_18_6_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_18_7_ID = /*00*/180799;
|
||||
public static final Version V_0_18_7 = new Version(V_0_18_7_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_18_8_ID = /*00*/180899;
|
||||
public static final Version V_0_18_8 = new Version(V_0_18_8_ID, LUCENE_3_EMULATION_VERSION);
|
||||
|
||||
public static final int V_0_19_0_RC1_ID = /*00*/190051;
|
||||
public static final Version V_0_19_0_RC1 = new Version(V_0_19_0_RC1_ID, LUCENE_3_EMULATION_VERSION);
|
||||
|
||||
public static final int V_0_19_0_RC2_ID = /*00*/190052;
|
||||
public static final Version V_0_19_0_RC2 = new Version(V_0_19_0_RC2_ID, LUCENE_3_EMULATION_VERSION);
|
||||
|
||||
public static final int V_0_19_0_RC3_ID = /*00*/190053;
|
||||
public static final Version V_0_19_0_RC3 = new Version(V_0_19_0_RC3_ID, LUCENE_3_EMULATION_VERSION);
|
||||
|
||||
public static final int V_0_19_0_ID = /*00*/190099;
|
||||
public static final Version V_0_19_0 = new Version(V_0_19_0_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_1_ID = /*00*/190199;
|
||||
public static final Version V_0_19_1 = new Version(V_0_19_1_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_2_ID = /*00*/190299;
|
||||
public static final Version V_0_19_2 = new Version(V_0_19_2_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_3_ID = /*00*/190399;
|
||||
public static final Version V_0_19_3 = new Version(V_0_19_3_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_4_ID = /*00*/190499;
|
||||
public static final Version V_0_19_4 = new Version(V_0_19_4_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_5_ID = /*00*/190599;
|
||||
public static final Version V_0_19_5 = new Version(V_0_19_5_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_6_ID = /*00*/190699;
|
||||
public static final Version V_0_19_6 = new Version(V_0_19_6_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_7_ID = /*00*/190799;
|
||||
public static final Version V_0_19_7 = new Version(V_0_19_7_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_8_ID = /*00*/190899;
|
||||
public static final Version V_0_19_8 = new Version(V_0_19_8_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_9_ID = /*00*/190999;
|
||||
public static final Version V_0_19_9 = new Version(V_0_19_9_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_10_ID = /*00*/191099;
|
||||
public static final Version V_0_19_10 = new Version(V_0_19_10_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_11_ID = /*00*/191199;
|
||||
public static final Version V_0_19_11 = new Version(V_0_19_11_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_12_ID = /*00*/191299;
|
||||
public static final Version V_0_19_12 = new Version(V_0_19_12_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_13_ID = /*00*/191399;
|
||||
public static final Version V_0_19_13 = new Version(V_0_19_13_ID, LUCENE_3_EMULATION_VERSION);
|
||||
|
||||
public static final int V_0_20_0_RC1_ID = /*00*/200051;
|
||||
public static final Version V_0_20_0_RC1 = new Version(V_0_20_0_RC1_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_20_0_ID = /*00*/200099;
|
||||
public static final Version V_0_20_0 = new Version(V_0_20_0_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_20_1_ID = /*00*/200199;
|
||||
public static final Version V_0_20_1 = new Version(V_0_20_1_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_20_2_ID = /*00*/200299;
|
||||
public static final Version V_0_20_2 = new Version(V_0_20_2_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_20_3_ID = /*00*/200399;
|
||||
public static final Version V_0_20_3 = new Version(V_0_20_3_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_20_4_ID = /*00*/200499;
|
||||
public static final Version V_0_20_4 = new Version(V_0_20_4_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_20_5_ID = /*00*/200599;
|
||||
public static final Version V_0_20_5 = new Version(V_0_20_5_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_20_6_ID = /*00*/200699;
|
||||
public static final Version V_0_20_6 = new Version(V_0_20_6_ID, LUCENE_3_EMULATION_VERSION);
|
||||
|
||||
public static final int V_0_90_0_Beta1_ID = /*00*/900001;
|
||||
public static final Version V_0_90_0_Beta1 = new Version(V_0_90_0_Beta1_ID, org.apache.lucene.util.Version.LUCENE_4_1);
|
||||
public static final int V_0_90_0_RC1_ID = /*00*/900051;
|
||||
public static final Version V_0_90_0_RC1 = new Version(V_0_90_0_RC1_ID, org.apache.lucene.util.Version.LUCENE_4_1);
|
||||
public static final int V_0_90_0_RC2_ID = /*00*/900052;
|
||||
public static final Version V_0_90_0_RC2 = new Version(V_0_90_0_RC2_ID, org.apache.lucene.util.Version.LUCENE_4_2);
|
||||
public static final int V_0_90_0_ID = /*00*/900099;
|
||||
public static final Version V_0_90_0 = new Version(V_0_90_0_ID, org.apache.lucene.util.Version.LUCENE_4_2);
|
||||
public static final int V_0_90_1_ID = /*00*/900199;
|
||||
public static final Version V_0_90_1 = new Version(V_0_90_1_ID, org.apache.lucene.util.Version.LUCENE_4_3);
|
||||
public static final int V_0_90_2_ID = /*00*/900299;
|
||||
public static final Version V_0_90_2 = new Version(V_0_90_2_ID, org.apache.lucene.util.Version.LUCENE_4_3);
|
||||
public static final int V_0_90_3_ID = /*00*/900399;
|
||||
public static final Version V_0_90_3 = new Version(V_0_90_3_ID, org.apache.lucene.util.Version.LUCENE_4_4);
|
||||
public static final int V_0_90_4_ID = /*00*/900499;
|
||||
public static final Version V_0_90_4 = new Version(V_0_90_4_ID, org.apache.lucene.util.Version.LUCENE_4_4);
|
||||
public static final int V_0_90_5_ID = /*00*/900599;
|
||||
public static final Version V_0_90_5 = new Version(V_0_90_5_ID, org.apache.lucene.util.Version.LUCENE_4_4);
|
||||
public static final int V_0_90_6_ID = /*00*/900699;
|
||||
public static final Version V_0_90_6 = new Version(V_0_90_6_ID, org.apache.lucene.util.Version.LUCENE_4_5);
|
||||
public static final int V_0_90_7_ID = /*00*/900799;
|
||||
public static final Version V_0_90_7 = new Version(V_0_90_7_ID, org.apache.lucene.util.Version.LUCENE_4_5);
|
||||
public static final int V_0_90_8_ID = /*00*/900899;
|
||||
public static final Version V_0_90_8 = new Version(V_0_90_8_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_0_90_9_ID = /*00*/900999;
|
||||
public static final Version V_0_90_9 = new Version(V_0_90_9_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_0_90_10_ID = /*00*/901099;
|
||||
public static final Version V_0_90_10 = new Version(V_0_90_10_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_0_90_11_ID = /*00*/901199;
|
||||
public static final Version V_0_90_11 = new Version(V_0_90_11_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_0_90_12_ID = /*00*/901299;
|
||||
public static final Version V_0_90_12 = new Version(V_0_90_12_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_0_90_13_ID = /*00*/901399;
|
||||
public static final Version V_0_90_13 = new Version(V_0_90_13_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
|
||||
public static final int V_1_0_0_Beta1_ID = 1000001;
|
||||
public static final Version V_1_0_0_Beta1 = new Version(V_1_0_0_Beta1_ID, org.apache.lucene.util.Version.LUCENE_4_5);
|
||||
public static final int V_1_0_0_Beta2_ID = 1000002;
|
||||
public static final Version V_1_0_0_Beta2 = new Version(V_1_0_0_Beta2_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_1_0_0_RC1_ID = 1000051;
|
||||
public static final Version V_1_0_0_RC1 = new Version(V_1_0_0_RC1_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_1_0_0_RC2_ID = 1000052;
|
||||
public static final Version V_1_0_0_RC2 = new Version(V_1_0_0_RC2_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_1_0_0_ID = 1000099;
|
||||
public static final Version V_1_0_0 = new Version(V_1_0_0_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_1_0_1_ID = 1000199;
|
||||
public static final Version V_1_0_1 = new Version(V_1_0_1_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_1_0_2_ID = 1000299;
|
||||
public static final Version V_1_0_2 = new Version(V_1_0_2_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_1_0_3_ID = 1000399;
|
||||
public static final Version V_1_0_3 = new Version(V_1_0_3_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_1_1_0_ID = 1010099;
|
||||
public static final Version V_1_1_0 = new Version(V_1_1_0_ID, org.apache.lucene.util.Version.LUCENE_4_7);
|
||||
public static final int V_1_1_1_ID = 1010199;
|
||||
public static final Version V_1_1_1 = new Version(V_1_1_1_ID, org.apache.lucene.util.Version.LUCENE_4_7);
|
||||
public static final int V_1_1_2_ID = 1010299;
|
||||
public static final Version V_1_1_2 = new Version(V_1_1_2_ID, org.apache.lucene.util.Version.LUCENE_4_7);
|
||||
public static final int V_1_2_0_ID = 1020099;
|
||||
public static final Version V_1_2_0 = new Version(V_1_2_0_ID, org.apache.lucene.util.Version.LUCENE_4_8);
|
||||
public static final int V_1_2_1_ID = 1020199;
|
||||
public static final Version V_1_2_1 = new Version(V_1_2_1_ID, org.apache.lucene.util.Version.LUCENE_4_8);
|
||||
public static final int V_1_2_2_ID = 1020299;
|
||||
public static final Version V_1_2_2 = new Version(V_1_2_2_ID, org.apache.lucene.util.Version.LUCENE_4_8);
|
||||
public static final int V_1_2_3_ID = 1020399;
|
||||
public static final Version V_1_2_3 = new Version(V_1_2_3_ID, org.apache.lucene.util.Version.LUCENE_4_8);
|
||||
public static final int V_1_2_4_ID = 1020499;
|
||||
public static final Version V_1_2_4 = new Version(V_1_2_4_ID, org.apache.lucene.util.Version.LUCENE_4_8);
|
||||
public static final int V_1_3_0_ID = 1030099;
|
||||
public static final Version V_1_3_0 = new Version(V_1_3_0_ID, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_1_ID = 1030199;
|
||||
public static final Version V_1_3_1 = new Version(V_1_3_1_ID, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_2_ID = 1030299;
|
||||
public static final Version V_1_3_2 = new Version(V_1_3_2_ID, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_3_ID = 1030399;
|
||||
public static final Version V_1_3_3 = new Version(V_1_3_3_ID, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_4_ID = 1030499;
|
||||
public static final Version V_1_3_4 = new Version(V_1_3_4_ID, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_5_ID = 1030599;
|
||||
public static final Version V_1_3_5 = new Version(V_1_3_5_ID, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_6_ID = 1030699;
|
||||
public static final Version V_1_3_6 = new Version(V_1_3_6_ID, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_7_ID = 1030799;
|
||||
public static final Version V_1_3_7 = new Version(V_1_3_7_ID, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_8_ID = 1030899;
|
||||
public static final Version V_1_3_8 = new Version(V_1_3_8_ID, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_9_ID = 1030999;
|
||||
public static final Version V_1_3_9 = new Version(V_1_3_9_ID, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_4_0_Beta1_ID = 1040001;
|
||||
public static final Version V_1_4_0_Beta1 = new Version(V_1_4_0_Beta1_ID, org.apache.lucene.util.Version.LUCENE_4_10_1);
|
||||
public static final int V_1_4_0_ID = 1040099;
|
||||
public static final Version V_1_4_0 = new Version(V_1_4_0_ID, org.apache.lucene.util.Version.LUCENE_4_10_2);
|
||||
public static final int V_1_4_1_ID = 1040199;
|
||||
public static final Version V_1_4_1 = new Version(V_1_4_1_ID, org.apache.lucene.util.Version.LUCENE_4_10_2);
|
||||
public static final int V_1_4_2_ID = 1040299;
|
||||
public static final Version V_1_4_2 = new Version(V_1_4_2_ID, org.apache.lucene.util.Version.LUCENE_4_10_2);
|
||||
public static final int V_1_4_3_ID = 1040399;
|
||||
public static final Version V_1_4_3 = new Version(V_1_4_3_ID, org.apache.lucene.util.Version.LUCENE_4_10_3);
|
||||
public static final int V_1_4_4_ID = 1040499;
|
||||
public static final Version V_1_4_4 = new Version(V_1_4_4_ID, org.apache.lucene.util.Version.LUCENE_4_10_3);
|
||||
public static final int V_1_4_5_ID = 1040599;
|
||||
public static final Version V_1_4_5 = new Version(V_1_4_5_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_5_0_ID = 1050099;
|
||||
public static final Version V_1_5_0 = new Version(V_1_5_0_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_5_1_ID = 1050199;
|
||||
public static final Version V_1_5_1 = new Version(V_1_5_1_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_5_2_ID = 1050299;
|
||||
public static final Version V_1_5_2 = new Version(V_1_5_2_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_6_0_ID = 1060099;
|
||||
public static final Version V_1_6_0 = new Version(V_1_6_0_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_6_1_ID = 1060199;
|
||||
public static final Version V_1_6_1 = new Version(V_1_6_1_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_6_2_ID = 1060299;
|
||||
public static final Version V_1_6_2 = new Version(V_1_6_2_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_7_0_ID = 1070099;
|
||||
public static final Version V_1_7_0 = new Version(V_1_7_0_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_7_1_ID = 1070199;
|
||||
public static final Version V_1_7_1 = new Version(V_1_7_1_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_7_2_ID = 1070299;
|
||||
public static final Version V_1_7_2 = new Version(V_1_7_2_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_7_3_ID = 1070399;
|
||||
public static final Version V_1_7_3 = new Version(V_1_7_3_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_7_4_ID = 1070499;
|
||||
public static final Version V_1_7_4 = new Version(V_1_7_4_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_7_5_ID = 1070599;
|
||||
public static final Version V_1_7_5 = new Version(V_1_7_5_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
|
||||
public static final int V_2_0_0_beta1_ID = 2000001;
|
||||
public static final Version V_2_0_0_beta1 = new Version(V_2_0_0_beta1_ID, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
|
@ -264,9 +62,9 @@ public class Version {
|
|||
public static final Version V_2_2_0 = new Version(V_2_2_0_ID, org.apache.lucene.util.Version.LUCENE_5_4_1);
|
||||
public static final int V_2_3_0_ID = 2030099;
|
||||
public static final Version V_2_3_0 = new Version(V_2_3_0_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final int V_3_0_0_ID = 3000099;
|
||||
public static final Version V_3_0_0 = new Version(V_3_0_0_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final Version CURRENT = V_3_0_0;
|
||||
public static final int V_5_0_0_ID = 5000099;
|
||||
public static final Version V_5_0_0 = new Version(V_5_0_0_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
|
||||
public static final Version CURRENT = V_5_0_0;
|
||||
|
||||
static {
|
||||
assert CURRENT.luceneVersion.equals(org.apache.lucene.util.Version.LATEST) : "Version must be upgraded to ["
|
||||
|
@ -279,8 +77,8 @@ public class Version {
|
|||
|
||||
public static Version fromId(int id) {
|
||||
switch (id) {
|
||||
case V_3_0_0_ID:
|
||||
return V_3_0_0;
|
||||
case V_5_0_0_ID:
|
||||
return V_5_0_0;
|
||||
case V_2_3_0_ID:
|
||||
return V_2_3_0;
|
||||
case V_2_2_0_ID:
|
||||
|
@ -303,198 +101,6 @@ public class Version {
|
|||
return V_2_0_0_beta2;
|
||||
case V_2_0_0_beta1_ID:
|
||||
return V_2_0_0_beta1;
|
||||
case V_1_7_5_ID:
|
||||
return V_1_7_5;
|
||||
case V_1_7_4_ID:
|
||||
return V_1_7_4;
|
||||
case V_1_7_3_ID:
|
||||
return V_1_7_3;
|
||||
case V_1_7_2_ID:
|
||||
return V_1_7_2;
|
||||
case V_1_7_1_ID:
|
||||
return V_1_7_1;
|
||||
case V_1_7_0_ID:
|
||||
return V_1_7_0;
|
||||
case V_1_6_2_ID:
|
||||
return V_1_6_2;
|
||||
case V_1_6_1_ID:
|
||||
return V_1_6_1;
|
||||
case V_1_6_0_ID:
|
||||
return V_1_6_0;
|
||||
case V_1_5_2_ID:
|
||||
return V_1_5_2;
|
||||
case V_1_5_1_ID:
|
||||
return V_1_5_1;
|
||||
case V_1_5_0_ID:
|
||||
return V_1_5_0;
|
||||
case V_1_4_5_ID:
|
||||
return V_1_4_5;
|
||||
case V_1_4_4_ID:
|
||||
return V_1_4_4;
|
||||
case V_1_4_3_ID:
|
||||
return V_1_4_3;
|
||||
case V_1_4_2_ID:
|
||||
return V_1_4_2;
|
||||
case V_1_4_1_ID:
|
||||
return V_1_4_1;
|
||||
case V_1_4_0_ID:
|
||||
return V_1_4_0;
|
||||
case V_1_4_0_Beta1_ID:
|
||||
return V_1_4_0_Beta1;
|
||||
case V_1_3_9_ID:
|
||||
return V_1_3_9;
|
||||
case V_1_3_8_ID:
|
||||
return V_1_3_8;
|
||||
case V_1_3_7_ID:
|
||||
return V_1_3_7;
|
||||
case V_1_3_6_ID:
|
||||
return V_1_3_6;
|
||||
case V_1_3_5_ID:
|
||||
return V_1_3_5;
|
||||
case V_1_3_4_ID:
|
||||
return V_1_3_4;
|
||||
case V_1_3_3_ID:
|
||||
return V_1_3_3;
|
||||
case V_1_3_2_ID:
|
||||
return V_1_3_2;
|
||||
case V_1_3_1_ID:
|
||||
return V_1_3_1;
|
||||
case V_1_3_0_ID:
|
||||
return V_1_3_0;
|
||||
case V_1_2_4_ID:
|
||||
return V_1_2_4;
|
||||
case V_1_2_3_ID:
|
||||
return V_1_2_3;
|
||||
case V_1_2_2_ID:
|
||||
return V_1_2_2;
|
||||
case V_1_2_1_ID:
|
||||
return V_1_2_1;
|
||||
case V_1_2_0_ID:
|
||||
return V_1_2_0;
|
||||
case V_1_1_2_ID:
|
||||
return V_1_1_2;
|
||||
case V_1_1_1_ID:
|
||||
return V_1_1_1;
|
||||
case V_1_1_0_ID:
|
||||
return V_1_1_0;
|
||||
case V_1_0_3_ID:
|
||||
return V_1_0_3;
|
||||
case V_1_0_2_ID:
|
||||
return V_1_0_2;
|
||||
case V_1_0_1_ID:
|
||||
return V_1_0_1;
|
||||
case V_1_0_0_ID:
|
||||
return V_1_0_0;
|
||||
case V_1_0_0_RC2_ID:
|
||||
return V_1_0_0_RC2;
|
||||
case V_1_0_0_RC1_ID:
|
||||
return V_1_0_0_RC1;
|
||||
case V_1_0_0_Beta2_ID:
|
||||
return V_1_0_0_Beta2;
|
||||
case V_1_0_0_Beta1_ID:
|
||||
return V_1_0_0_Beta1;
|
||||
case V_0_90_13_ID:
|
||||
return V_0_90_13;
|
||||
case V_0_90_12_ID:
|
||||
return V_0_90_12;
|
||||
case V_0_90_11_ID:
|
||||
return V_0_90_11;
|
||||
case V_0_90_10_ID:
|
||||
return V_0_90_10;
|
||||
case V_0_90_9_ID:
|
||||
return V_0_90_9;
|
||||
case V_0_90_8_ID:
|
||||
return V_0_90_8;
|
||||
case V_0_90_7_ID:
|
||||
return V_0_90_7;
|
||||
case V_0_90_6_ID:
|
||||
return V_0_90_6;
|
||||
case V_0_90_5_ID:
|
||||
return V_0_90_5;
|
||||
case V_0_90_4_ID:
|
||||
return V_0_90_4;
|
||||
case V_0_90_3_ID:
|
||||
return V_0_90_3;
|
||||
case V_0_90_2_ID:
|
||||
return V_0_90_2;
|
||||
case V_0_90_1_ID:
|
||||
return V_0_90_1;
|
||||
case V_0_90_0_ID:
|
||||
return V_0_90_0;
|
||||
case V_0_90_0_RC2_ID:
|
||||
return V_0_90_0_RC2;
|
||||
case V_0_90_0_RC1_ID:
|
||||
return V_0_90_0_RC1;
|
||||
case V_0_90_0_Beta1_ID:
|
||||
return V_0_90_0_Beta1;
|
||||
case V_0_20_6_ID:
|
||||
return V_0_20_6;
|
||||
case V_0_20_5_ID:
|
||||
return V_0_20_5;
|
||||
case V_0_20_4_ID:
|
||||
return V_0_20_4;
|
||||
case V_0_20_3_ID:
|
||||
return V_0_20_3;
|
||||
case V_0_20_2_ID:
|
||||
return V_0_20_2;
|
||||
case V_0_20_1_ID:
|
||||
return V_0_20_1;
|
||||
case V_0_20_0_ID:
|
||||
return V_0_20_0;
|
||||
case V_0_20_0_RC1_ID:
|
||||
return V_0_20_0_RC1;
|
||||
case V_0_19_0_RC1_ID:
|
||||
return V_0_19_0_RC1;
|
||||
case V_0_19_0_RC2_ID:
|
||||
return V_0_19_0_RC2;
|
||||
case V_0_19_0_RC3_ID:
|
||||
return V_0_19_0_RC3;
|
||||
case V_0_19_0_ID:
|
||||
return V_0_19_0;
|
||||
case V_0_19_1_ID:
|
||||
return V_0_19_1;
|
||||
case V_0_19_2_ID:
|
||||
return V_0_19_2;
|
||||
case V_0_19_3_ID:
|
||||
return V_0_19_3;
|
||||
case V_0_19_4_ID:
|
||||
return V_0_19_4;
|
||||
case V_0_19_5_ID:
|
||||
return V_0_19_5;
|
||||
case V_0_19_6_ID:
|
||||
return V_0_19_6;
|
||||
case V_0_19_7_ID:
|
||||
return V_0_19_7;
|
||||
case V_0_19_8_ID:
|
||||
return V_0_19_8;
|
||||
case V_0_19_9_ID:
|
||||
return V_0_19_9;
|
||||
case V_0_19_10_ID:
|
||||
return V_0_19_10;
|
||||
case V_0_19_11_ID:
|
||||
return V_0_19_11;
|
||||
case V_0_19_12_ID:
|
||||
return V_0_19_12;
|
||||
case V_0_19_13_ID:
|
||||
return V_0_19_13;
|
||||
case V_0_18_0_ID:
|
||||
return V_0_18_0;
|
||||
case V_0_18_1_ID:
|
||||
return V_0_18_1;
|
||||
case V_0_18_2_ID:
|
||||
return V_0_18_2;
|
||||
case V_0_18_3_ID:
|
||||
return V_0_18_3;
|
||||
case V_0_18_4_ID:
|
||||
return V_0_18_4;
|
||||
case V_0_18_5_ID:
|
||||
return V_0_18_5;
|
||||
case V_0_18_6_ID:
|
||||
return V_0_18_6;
|
||||
case V_0_18_7_ID:
|
||||
return V_0_18_7;
|
||||
case V_0_18_8_ID:
|
||||
return V_0_18_8;
|
||||
default:
|
||||
return new Version(id, org.apache.lucene.util.Version.LATEST);
|
||||
}
|
||||
|
@ -531,15 +137,23 @@ public class Version {
|
|||
if (!Strings.hasLength(version)) {
|
||||
return Version.CURRENT;
|
||||
}
|
||||
final boolean snapshot; // this is some BWC for 2.x and before indices
|
||||
if (snapshot = version.endsWith("-SNAPSHOT")) {
|
||||
version = version.substring(0, version.length() - 9);
|
||||
}
|
||||
String[] parts = version.split("\\.|\\-");
|
||||
if (parts.length < 3 || parts.length > 4) {
|
||||
throw new IllegalArgumentException("the version needs to contain major, minor, and revision, and optionally the build: " + version);
|
||||
}
|
||||
|
||||
try {
|
||||
|
||||
final int rawMajor = Integer.parseInt(parts[0]);
|
||||
if (rawMajor >= 5 && snapshot) { // we don't support snapshot as part of the version here anymore
|
||||
throw new IllegalArgumentException("illegal version format - snapshots are only supported until version 2.x");
|
||||
}
|
||||
final int betaOffset = rawMajor < 5 ? 0 : 25;
|
||||
//we reverse the version id calculation based on some assumption as we can't reliably reverse the modulo
|
||||
final int major = Integer.parseInt(parts[0]) * 1000000;
|
||||
final int major = rawMajor * 1000000;
|
||||
final int minor = Integer.parseInt(parts[1]) * 10000;
|
||||
final int revision = Integer.parseInt(parts[2]) * 100;
|
||||
|
||||
|
@ -547,11 +161,17 @@ public class Version {
|
|||
int build = 99;
|
||||
if (parts.length == 4) {
|
||||
String buildStr = parts[3];
|
||||
if (buildStr.startsWith("Beta") || buildStr.startsWith("beta")) {
|
||||
build = Integer.parseInt(buildStr.substring(4));
|
||||
}
|
||||
if (buildStr.startsWith("RC") || buildStr.startsWith("rc")) {
|
||||
if (buildStr.startsWith("alpha")) {
|
||||
assert rawMajor >= 5 : "major must be >= 5 but was " + major;
|
||||
build = Integer.parseInt(buildStr.substring(5));
|
||||
assert build < 25 : "expected a beta build but " + build + " >= 25";
|
||||
} else if (buildStr.startsWith("Beta") || buildStr.startsWith("beta")) {
|
||||
build = betaOffset + Integer.parseInt(buildStr.substring(4));
|
||||
assert build < 50 : "expected a beta build but " + build + " >= 50";
|
||||
} else if (buildStr.startsWith("RC") || buildStr.startsWith("rc")) {
|
||||
build = Integer.parseInt(buildStr.substring(2)) + 50;
|
||||
} else {
|
||||
throw new IllegalArgumentException("unable to parse version " + version);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -614,13 +234,16 @@ public class Version {
|
|||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append(major).append('.').append(minor).append('.').append(revision);
|
||||
if (isBeta()) {
|
||||
if (isAlpha()) {
|
||||
sb.append("-alpha");
|
||||
sb.append(build);
|
||||
} else if (isBeta()) {
|
||||
if (major >= 2) {
|
||||
sb.append("-beta");
|
||||
} else {
|
||||
sb.append(".Beta");
|
||||
}
|
||||
sb.append(build);
|
||||
sb.append(major < 5 ? build : build-25);
|
||||
} else if (build < 99) {
|
||||
if (major >= 2) {
|
||||
sb.append("-rc");
|
||||
|
@ -656,7 +279,16 @@ public class Version {
|
|||
}
|
||||
|
||||
public boolean isBeta() {
|
||||
return build < 50;
|
||||
return major < 5 ? build < 50 : build >= 25 && build < 50;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true iff this version is an alpha version
|
||||
* Note: This has been introduced in elasticsearch version 5. Previous versions will never
|
||||
* have an alpha version.
|
||||
*/
|
||||
public boolean isAlpha() {
|
||||
return major < 5 ? false : build < 25;
|
||||
}
|
||||
|
||||
public boolean isRC() {
|
||||
|
|
|
@ -21,18 +21,16 @@ package org.elasticsearch.action;
|
|||
|
||||
/**
|
||||
* A listener for action responses or failures.
|
||||
*
|
||||
*
|
||||
*/
|
||||
public interface ActionListener<Response> {
|
||||
|
||||
/**
|
||||
* A response handler.
|
||||
* Handle action response. This response may constitute a failure or a
|
||||
* success but it is up to the listener to make that decision.
|
||||
*/
|
||||
void onResponse(Response response);
|
||||
|
||||
/**
|
||||
* A failure handler.
|
||||
* A failure caused by an exception at some phase of the task.
|
||||
*/
|
||||
void onFailure(Throwable e);
|
||||
}
|
||||
|
|
|
@ -174,12 +174,6 @@ import org.elasticsearch.action.search.TransportClearScrollAction;
|
|||
import org.elasticsearch.action.search.TransportMultiSearchAction;
|
||||
import org.elasticsearch.action.search.TransportSearchAction;
|
||||
import org.elasticsearch.action.search.TransportSearchScrollAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchDfsQueryAndFetchAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchDfsQueryThenFetchAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchQueryAndFetchAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchQueryThenFetchAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchScrollQueryAndFetchAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchScrollQueryThenFetchAction;
|
||||
import org.elasticsearch.action.suggest.SuggestAction;
|
||||
import org.elasticsearch.action.suggest.TransportSuggestAction;
|
||||
import org.elasticsearch.action.support.ActionFilter;
|
||||
|
@ -333,16 +327,8 @@ public class ActionModule extends AbstractModule {
|
|||
TransportShardMultiGetAction.class);
|
||||
registerAction(BulkAction.INSTANCE, TransportBulkAction.class,
|
||||
TransportShardBulkAction.class);
|
||||
registerAction(SearchAction.INSTANCE, TransportSearchAction.class,
|
||||
TransportSearchDfsQueryThenFetchAction.class,
|
||||
TransportSearchQueryThenFetchAction.class,
|
||||
TransportSearchDfsQueryAndFetchAction.class,
|
||||
TransportSearchQueryAndFetchAction.class
|
||||
);
|
||||
registerAction(SearchScrollAction.INSTANCE, TransportSearchScrollAction.class,
|
||||
TransportSearchScrollQueryThenFetchAction.class,
|
||||
TransportSearchScrollQueryAndFetchAction.class
|
||||
);
|
||||
registerAction(SearchAction.INSTANCE, TransportSearchAction.class);
|
||||
registerAction(SearchScrollAction.INSTANCE, TransportSearchScrollAction.class);
|
||||
registerAction(MultiSearchAction.INSTANCE, TransportMultiSearchAction.class);
|
||||
registerAction(PercolateAction.INSTANCE, TransportPercolateAction.class);
|
||||
registerAction(MultiPercolateAction.INSTANCE, TransportMultiPercolateAction.class, TransportShardMultiPercolateAction.class);
|
||||
|
|
|
@ -197,9 +197,7 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo
|
|||
numberOfPendingTasks = in.readInt();
|
||||
timedOut = in.readBoolean();
|
||||
numberOfInFlightFetch = in.readInt();
|
||||
if (in.getVersion().onOrAfter(Version.V_1_7_0)) {
|
||||
delayedUnassignedShards= in.readInt();
|
||||
}
|
||||
delayedUnassignedShards= in.readInt();
|
||||
taskMaxWaitingTime = TimeValue.readTimeValue(in);
|
||||
}
|
||||
|
||||
|
@ -212,9 +210,7 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo
|
|||
out.writeInt(numberOfPendingTasks);
|
||||
out.writeBoolean(timedOut);
|
||||
out.writeInt(numberOfInFlightFetch);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_7_0)) {
|
||||
out.writeInt(delayedUnassignedShards);
|
||||
}
|
||||
out.writeInt(delayedUnassignedShards);
|
||||
taskMaxWaitingTime.writeTo(out);
|
||||
}
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
|||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.http.HttpInfo;
|
||||
import org.elasticsearch.ingest.core.IngestInfo;
|
||||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
import org.elasticsearch.monitor.os.OsInfo;
|
||||
import org.elasticsearch.monitor.process.ProcessInfo;
|
||||
|
@ -74,12 +75,15 @@ public class NodeInfo extends BaseNodeResponse {
|
|||
@Nullable
|
||||
private PluginsAndModules plugins;
|
||||
|
||||
NodeInfo() {
|
||||
@Nullable
|
||||
private IngestInfo ingest;
|
||||
|
||||
public NodeInfo() {
|
||||
}
|
||||
|
||||
public NodeInfo(Version version, Build build, DiscoveryNode node, @Nullable Map<String, String> serviceAttributes, @Nullable Settings settings,
|
||||
@Nullable OsInfo os, @Nullable ProcessInfo process, @Nullable JvmInfo jvm, @Nullable ThreadPoolInfo threadPool,
|
||||
@Nullable TransportInfo transport, @Nullable HttpInfo http, @Nullable PluginsAndModules plugins) {
|
||||
@Nullable TransportInfo transport, @Nullable HttpInfo http, @Nullable PluginsAndModules plugins, @Nullable IngestInfo ingest) {
|
||||
super(node);
|
||||
this.version = version;
|
||||
this.build = build;
|
||||
|
@ -92,6 +96,7 @@ public class NodeInfo extends BaseNodeResponse {
|
|||
this.transport = transport;
|
||||
this.http = http;
|
||||
this.plugins = plugins;
|
||||
this.ingest = ingest;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -176,6 +181,11 @@ public class NodeInfo extends BaseNodeResponse {
|
|||
return this.plugins;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public IngestInfo getIngest() {
|
||||
return ingest;
|
||||
}
|
||||
|
||||
public static NodeInfo readNodeInfo(StreamInput in) throws IOException {
|
||||
NodeInfo nodeInfo = new NodeInfo();
|
||||
nodeInfo.readFrom(in);
|
||||
|
@ -220,6 +230,10 @@ public class NodeInfo extends BaseNodeResponse {
|
|||
plugins = new PluginsAndModules();
|
||||
plugins.readFrom(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
ingest = new IngestInfo();
|
||||
ingest.readFrom(in);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -285,5 +299,11 @@ public class NodeInfo extends BaseNodeResponse {
|
|||
out.writeBoolean(true);
|
||||
plugins.writeTo(out);
|
||||
}
|
||||
if (ingest == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
ingest.writeTo(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,6 +38,7 @@ public class NodesInfoRequest extends BaseNodesRequest<NodesInfoRequest> {
|
|||
private boolean transport = true;
|
||||
private boolean http = true;
|
||||
private boolean plugins = true;
|
||||
private boolean ingest = true;
|
||||
|
||||
public NodesInfoRequest() {
|
||||
}
|
||||
|
@ -62,6 +63,7 @@ public class NodesInfoRequest extends BaseNodesRequest<NodesInfoRequest> {
|
|||
transport = false;
|
||||
http = false;
|
||||
plugins = false;
|
||||
ingest = false;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -77,6 +79,7 @@ public class NodesInfoRequest extends BaseNodesRequest<NodesInfoRequest> {
|
|||
transport = true;
|
||||
http = true;
|
||||
plugins = true;
|
||||
ingest = true;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -202,6 +205,22 @@ public class NodesInfoRequest extends BaseNodesRequest<NodesInfoRequest> {
|
|||
return plugins;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should information about ingest be returned
|
||||
* @param ingest true if you want info
|
||||
*/
|
||||
public NodesInfoRequest ingest(boolean ingest) {
|
||||
this.ingest = ingest;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if information about ingest is requested
|
||||
*/
|
||||
public boolean ingest() {
|
||||
return ingest;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
|
@ -213,6 +232,7 @@ public class NodesInfoRequest extends BaseNodesRequest<NodesInfoRequest> {
|
|||
transport = in.readBoolean();
|
||||
http = in.readBoolean();
|
||||
plugins = in.readBoolean();
|
||||
ingest = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -226,5 +246,6 @@ public class NodesInfoRequest extends BaseNodesRequest<NodesInfoRequest> {
|
|||
out.writeBoolean(transport);
|
||||
out.writeBoolean(http);
|
||||
out.writeBoolean(plugins);
|
||||
out.writeBoolean(ingest);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -110,4 +110,12 @@ public class NodesInfoRequestBuilder extends NodesOperationRequestBuilder<NodesI
|
|||
request().plugins(plugins);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should the node ingest info be returned.
|
||||
*/
|
||||
public NodesInfoRequestBuilder setIngest(boolean ingest) {
|
||||
request().ingest(ingest);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -121,6 +121,9 @@ public class NodesInfoResponse extends BaseNodesResponse<NodeInfo> implements To
|
|||
if (nodeInfo.getPlugins() != null) {
|
||||
nodeInfo.getPlugins().toXContent(builder, params);
|
||||
}
|
||||
if (nodeInfo.getIngest() != null) {
|
||||
nodeInfo.getIngest().toXContent(builder, params);
|
||||
}
|
||||
|
||||
builder.endObject();
|
||||
}
|
||||
|
|
|
@ -80,7 +80,7 @@ public class TransportNodesInfoAction extends TransportNodesAction<NodesInfoRequ
|
|||
protected NodeInfo nodeOperation(NodeInfoRequest nodeRequest) {
|
||||
NodesInfoRequest request = nodeRequest.request;
|
||||
return nodeService.info(request.settings(), request.os(), request.process(), request.jvm(), request.threadPool(),
|
||||
request.transport(), request.http(), request.plugins());
|
||||
request.transport(), request.http(), request.plugins(), request.ingest());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -95,7 +95,7 @@ public class TransportNodesInfoAction extends TransportNodesAction<NodesInfoRequ
|
|||
public NodeInfoRequest() {
|
||||
}
|
||||
|
||||
NodeInfoRequest(String nodeId, NodesInfoRequest request) {
|
||||
public NodeInfoRequest(String nodeId, NodesInfoRequest request) {
|
||||
super(nodeId);
|
||||
this.request = request;
|
||||
}
|
||||
|
|
|
@ -53,12 +53,18 @@ public class CancelTasksRequest extends BaseTasksRequest<CancelTasksRequest> {
|
|||
return super.match(task) && task instanceof CancellableTask;
|
||||
}
|
||||
|
||||
public CancelTasksRequest reason(String reason) {
|
||||
/**
|
||||
* Set the reason for canceling the task.
|
||||
*/
|
||||
public CancelTasksRequest setReason(String reason) {
|
||||
this.reason = reason;
|
||||
return this;
|
||||
}
|
||||
|
||||
public String reason() {
|
||||
/**
|
||||
* The reason for canceling the task.
|
||||
*/
|
||||
public String getReason() {
|
||||
return reason;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -84,21 +84,21 @@ public class TransportCancelTasksAction extends TransportTasksAction<Cancellable
|
|||
}
|
||||
|
||||
protected void processTasks(CancelTasksRequest request, Consumer<CancellableTask> operation) {
|
||||
if (request.taskId().isSet() == false) {
|
||||
if (request.getTaskId().isSet() == false) {
|
||||
// we are only checking one task, we can optimize it
|
||||
CancellableTask task = taskManager.getCancellableTask(request.taskId().getId());
|
||||
CancellableTask task = taskManager.getCancellableTask(request.getTaskId().getId());
|
||||
if (task != null) {
|
||||
if (request.match(task)) {
|
||||
operation.accept(task);
|
||||
} else {
|
||||
throw new IllegalArgumentException("task [" + request.taskId() + "] doesn't support this operation");
|
||||
throw new IllegalArgumentException("task [" + request.getTaskId() + "] doesn't support this operation");
|
||||
}
|
||||
} else {
|
||||
if (taskManager.getTask(request.taskId().getId()) != null) {
|
||||
if (taskManager.getTask(request.getTaskId().getId()) != null) {
|
||||
// The task exists, but doesn't support cancellation
|
||||
throw new IllegalArgumentException("task [" + request.taskId() + "] doesn't support cancellation");
|
||||
throw new IllegalArgumentException("task [" + request.getTaskId() + "] doesn't support cancellation");
|
||||
} else {
|
||||
throw new ResourceNotFoundException("task [{}] doesn't support cancellation", request.taskId());
|
||||
throw new ResourceNotFoundException("task [{}] doesn't support cancellation", request.getTaskId());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -113,14 +113,14 @@ public class TransportCancelTasksAction extends TransportTasksAction<Cancellable
|
|||
@Override
|
||||
protected synchronized TaskInfo taskOperation(CancelTasksRequest request, CancellableTask cancellableTask) {
|
||||
final BanLock banLock = new BanLock(nodes -> removeBanOnNodes(cancellableTask, nodes));
|
||||
Set<String> childNodes = taskManager.cancel(cancellableTask, request.reason(), banLock::onTaskFinished);
|
||||
Set<String> childNodes = taskManager.cancel(cancellableTask, request.getReason(), banLock::onTaskFinished);
|
||||
if (childNodes != null) {
|
||||
if (childNodes.isEmpty()) {
|
||||
logger.trace("cancelling task {} with no children", cancellableTask.getId());
|
||||
return cancellableTask.taskInfo(clusterService.localNode(), false);
|
||||
} else {
|
||||
logger.trace("cancelling task {} with children on nodes [{}]", cancellableTask.getId(), childNodes);
|
||||
setBanOnNodes(request.reason(), cancellableTask, childNodes, banLock);
|
||||
setBanOnNodes(request.getReason(), cancellableTask, childNodes, banLock);
|
||||
return cancellableTask.taskInfo(clusterService.localNode(), false);
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -31,31 +31,49 @@ import java.io.IOException;
|
|||
public class ListTasksRequest extends BaseTasksRequest<ListTasksRequest> {
|
||||
|
||||
private boolean detailed = false;
|
||||
private boolean waitForCompletion = false;
|
||||
|
||||
/**
|
||||
* Should the detailed task information be returned.
|
||||
*/
|
||||
public boolean detailed() {
|
||||
public boolean getDetailed() {
|
||||
return this.detailed;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should the detailed task information be returned.
|
||||
*/
|
||||
public ListTasksRequest detailed(boolean detailed) {
|
||||
public ListTasksRequest setDetailed(boolean detailed) {
|
||||
this.detailed = detailed;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should this request wait for all found tasks to complete?
|
||||
*/
|
||||
public boolean getWaitForCompletion() {
|
||||
return waitForCompletion;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should this request wait for all found tasks to complete?
|
||||
*/
|
||||
public ListTasksRequest setWaitForCompletion(boolean waitForCompletion) {
|
||||
this.waitForCompletion = waitForCompletion;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
detailed = in.readBoolean();
|
||||
waitForCompletion = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeBoolean(detailed);
|
||||
out.writeBoolean(waitForCompletion);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,7 +35,15 @@ public class ListTasksRequestBuilder extends TasksRequestBuilder<ListTasksReques
|
|||
* Should detailed task information be returned.
|
||||
*/
|
||||
public ListTasksRequestBuilder setDetailed(boolean detailed) {
|
||||
request.detailed(detailed);
|
||||
request.setDetailed(detailed);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should this request wait for all found tasks to complete?
|
||||
*/
|
||||
public final ListTasksRequestBuilder setWaitForCompletion(boolean waitForCompletion) {
|
||||
request.setWaitForCompletion(waitForCompletion);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.elasticsearch.tasks.Task;
|
|||
import org.elasticsearch.tasks.TaskId;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* Information about a currently running task.
|
||||
|
@ -50,17 +51,24 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
|
|||
|
||||
private final String description;
|
||||
|
||||
private final long startTime;
|
||||
|
||||
private final long runningTimeNanos;
|
||||
|
||||
private final Task.Status status;
|
||||
|
||||
private final TaskId parentTaskId;
|
||||
|
||||
public TaskInfo(DiscoveryNode node, long id, String type, String action, String description, Task.Status status, TaskId parentTaskId) {
|
||||
public TaskInfo(DiscoveryNode node, long id, String type, String action, String description, Task.Status status, long startTime,
|
||||
long runningTimeNanos, TaskId parentTaskId) {
|
||||
this.node = node;
|
||||
this.taskId = new TaskId(node.getId(), id);
|
||||
this.type = type;
|
||||
this.action = action;
|
||||
this.description = description;
|
||||
this.status = status;
|
||||
this.startTime = startTime;
|
||||
this.runningTimeNanos = runningTimeNanos;
|
||||
this.parentTaskId = parentTaskId;
|
||||
}
|
||||
|
||||
|
@ -75,6 +83,8 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
|
|||
} else {
|
||||
status = null;
|
||||
}
|
||||
startTime = in.readLong();
|
||||
runningTimeNanos = in.readLong();
|
||||
parentTaskId = new TaskId(in);
|
||||
}
|
||||
|
||||
|
@ -110,6 +120,23 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
|
|||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the task start time
|
||||
*/
|
||||
public long getStartTime() {
|
||||
return startTime;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the task running time
|
||||
*/
|
||||
public long getRunningTimeNanos() {
|
||||
return runningTimeNanos;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the parent task id
|
||||
*/
|
||||
public TaskId getParentTaskId() {
|
||||
return parentTaskId;
|
||||
}
|
||||
|
@ -132,6 +159,8 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
|
|||
} else {
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
out.writeLong(startTime);
|
||||
out.writeLong(runningTimeNanos);
|
||||
parentTaskId.writeTo(out);
|
||||
}
|
||||
|
||||
|
@ -147,6 +176,8 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
|
|||
if (description != null) {
|
||||
builder.field("description", description);
|
||||
}
|
||||
builder.dateValueField("start_time_in_millis", "start_time", startTime);
|
||||
builder.timeValueField("running_time_in_nanos", "running_time", runningTimeNanos, TimeUnit.NANOSECONDS);
|
||||
if (parentTaskId.isSet() == false) {
|
||||
builder.field("parent_task_id", parentTaskId.toString());
|
||||
}
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.node.tasks.list;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ElasticsearchTimeoutException;
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.action.TaskOperationFailure;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
|
@ -29,18 +31,24 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class TransportListTasksAction extends TransportTasksAction<Task, ListTasksRequest, ListTasksResponse, TaskInfo> {
|
||||
private static final TimeValue WAIT_FOR_COMPLETION_POLL = timeValueMillis(100);
|
||||
private static final TimeValue DEFAULT_WAIT_FOR_COMPLETION_TIMEOUT = timeValueSeconds(30);
|
||||
|
||||
@Inject
|
||||
public TransportListTasksAction(Settings settings, ClusterName clusterName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
|
@ -59,7 +67,34 @@ public class TransportListTasksAction extends TransportTasksAction<Task, ListTas
|
|||
|
||||
@Override
|
||||
protected TaskInfo taskOperation(ListTasksRequest request, Task task) {
|
||||
return task.taskInfo(clusterService.localNode(), request.detailed());
|
||||
return task.taskInfo(clusterService.localNode(), request.getDetailed());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void processTasks(ListTasksRequest request, Consumer<Task> operation) {
|
||||
if (false == request.getWaitForCompletion()) {
|
||||
super.processTasks(request, operation);
|
||||
return;
|
||||
}
|
||||
// If we should wait for completion then we have to intercept every found task and wait for it to leave the manager.
|
||||
TimeValue timeout = request.getTimeout();
|
||||
if (timeout == null) {
|
||||
timeout = DEFAULT_WAIT_FOR_COMPLETION_TIMEOUT;
|
||||
}
|
||||
long timeoutTime = System.nanoTime() + timeout.nanos();
|
||||
super.processTasks(request, operation.andThen((Task t) -> {
|
||||
while (System.nanoTime() - timeoutTime < 0) {
|
||||
if (taskManager.getTask(t.getId()) == null) {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
Thread.sleep(WAIT_FOR_COMPLETION_POLL.millis());
|
||||
} catch (InterruptedException e) {
|
||||
throw new ElasticsearchException("Interrupted waiting for completion of [{}]", e, t);
|
||||
}
|
||||
}
|
||||
throw new ElasticsearchTimeoutException("Timed out waiting for completion of [{}]", t);
|
||||
}));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -98,7 +98,7 @@ public class TransportClusterStatsAction extends TransportNodesAction<ClusterSta
|
|||
|
||||
@Override
|
||||
protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeRequest) {
|
||||
NodeInfo nodeInfo = nodeService.info(false, true, false, true, false, true, false, true);
|
||||
NodeInfo nodeInfo = nodeService.info(false, true, false, true, false, true, false, true, false);
|
||||
NodeStats nodeStats = nodeService.stats(CommonStatsFlags.NONE, false, true, true, false, true, false, false, false, false, false);
|
||||
List<ShardStats> shardsStats = new ArrayList<>();
|
||||
for (IndexService indexService : indicesService) {
|
||||
|
|
|
@ -166,7 +166,7 @@ public class CommonStats implements Streamable, ToXContent {
|
|||
completion = indexShard.completionStats(flags.completionDataFields());
|
||||
break;
|
||||
case Segments:
|
||||
segments = indexShard.segmentStats();
|
||||
segments = indexShard.segmentStats(flags.includeSegmentFileSizes());
|
||||
break;
|
||||
case Percolate:
|
||||
percolate = indexShard.percolateStats();
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.stats;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
|
@ -38,6 +39,7 @@ public class CommonStatsFlags implements Streamable, Cloneable {
|
|||
private String[] groups = null;
|
||||
private String[] fieldDataFields = null;
|
||||
private String[] completionDataFields = null;
|
||||
private boolean includeSegmentFileSizes = false;
|
||||
|
||||
|
||||
/**
|
||||
|
@ -62,6 +64,7 @@ public class CommonStatsFlags implements Streamable, Cloneable {
|
|||
groups = null;
|
||||
fieldDataFields = null;
|
||||
completionDataFields = null;
|
||||
includeSegmentFileSizes = false;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -74,6 +77,7 @@ public class CommonStatsFlags implements Streamable, Cloneable {
|
|||
groups = null;
|
||||
fieldDataFields = null;
|
||||
completionDataFields = null;
|
||||
includeSegmentFileSizes = false;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -137,6 +141,15 @@ public class CommonStatsFlags implements Streamable, Cloneable {
|
|||
return this.completionDataFields;
|
||||
}
|
||||
|
||||
public CommonStatsFlags includeSegmentFileSizes(boolean includeSegmentFileSizes) {
|
||||
this.includeSegmentFileSizes = includeSegmentFileSizes;
|
||||
return this;
|
||||
}
|
||||
|
||||
public boolean includeSegmentFileSizes() {
|
||||
return this.includeSegmentFileSizes;
|
||||
}
|
||||
|
||||
public boolean isSet(Flag flag) {
|
||||
return flags.contains(flag);
|
||||
}
|
||||
|
@ -177,6 +190,9 @@ public class CommonStatsFlags implements Streamable, Cloneable {
|
|||
out.writeStringArrayNullable(groups);
|
||||
out.writeStringArrayNullable(fieldDataFields);
|
||||
out.writeStringArrayNullable(completionDataFields);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_0_0)) {
|
||||
out.writeBoolean(includeSegmentFileSizes);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -192,6 +208,11 @@ public class CommonStatsFlags implements Streamable, Cloneable {
|
|||
groups = in.readStringArray();
|
||||
fieldDataFields = in.readStringArray();
|
||||
completionDataFields = in.readStringArray();
|
||||
if (in.getVersion().onOrAfter(Version.V_5_0_0)) {
|
||||
includeSegmentFileSizes = in.readBoolean();
|
||||
} else {
|
||||
includeSegmentFileSizes = false;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -265,6 +265,15 @@ public class IndicesStatsRequest extends BroadcastRequest<IndicesStatsRequest> {
|
|||
return flags.isSet(Flag.Recovery);
|
||||
}
|
||||
|
||||
public boolean includeSegmentFileSizes() {
|
||||
return flags.includeSegmentFileSizes();
|
||||
}
|
||||
|
||||
public IndicesStatsRequest includeSegmentFileSizes(boolean includeSegmentFileSizes) {
|
||||
flags.includeSegmentFileSizes(includeSegmentFileSizes);
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
|
|
|
@ -166,4 +166,9 @@ public class IndicesStatsRequestBuilder extends BroadcastOperationRequestBuilder
|
|||
request.recovery(recovery);
|
||||
return this;
|
||||
}
|
||||
|
||||
public IndicesStatsRequestBuilder setIncludeSegmentFileSizes(boolean includeSegmentFileSizes) {
|
||||
request.includeSegmentFileSizes(includeSegmentFileSizes);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -144,6 +144,7 @@ public class TransportIndicesStatsAction extends TransportBroadcastByNodeAction<
|
|||
}
|
||||
if (request.segments()) {
|
||||
flags.set(CommonStatsFlags.Flag.Segments);
|
||||
flags.includeSegmentFileSizes(request.includeSegmentFileSizes());
|
||||
}
|
||||
if (request.completion()) {
|
||||
flags.set(CommonStatsFlags.Flag.Completion);
|
||||
|
|
|
@ -28,7 +28,9 @@ import org.elasticsearch.action.update.UpdateResponse;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.StatusToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
@ -76,7 +78,15 @@ public class BulkItemResponse implements Streamable, StatusToXContent {
|
|||
/**
|
||||
* Represents a failure.
|
||||
*/
|
||||
public static class Failure {
|
||||
public static class Failure implements Writeable<Failure>, ToXContent {
|
||||
static final String INDEX_FIELD = "index";
|
||||
static final String TYPE_FIELD = "type";
|
||||
static final String ID_FIELD = "id";
|
||||
static final String CAUSE_FIELD = "cause";
|
||||
static final String STATUS_FIELD = "status";
|
||||
|
||||
public static final Failure PROTOTYPE = new Failure(null, null, null, null);
|
||||
|
||||
private final String index;
|
||||
private final String type;
|
||||
private final String id;
|
||||
|
@ -126,9 +136,39 @@ public class BulkItemResponse implements Streamable, StatusToXContent {
|
|||
return this.status;
|
||||
}
|
||||
|
||||
/**
|
||||
* The actual cause of the failure.
|
||||
*/
|
||||
public Throwable getCause() {
|
||||
return cause;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Failure readFrom(StreamInput in) throws IOException {
|
||||
return new Failure(in.readString(), in.readString(), in.readOptionalString(), in.readThrowable());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(getIndex());
|
||||
out.writeString(getType());
|
||||
out.writeOptionalString(getId());
|
||||
out.writeThrowable(getCause());
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field(INDEX_FIELD, index);
|
||||
builder.field(TYPE_FIELD, type);
|
||||
if (id != null) {
|
||||
builder.field(ID_FIELD, id);
|
||||
}
|
||||
builder.startObject(CAUSE_FIELD);
|
||||
ElasticsearchException.toXContent(builder, params, cause);
|
||||
builder.endObject();
|
||||
builder.field(STATUS_FIELD, status.getStatus());
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
|
||||
private int id;
|
||||
|
@ -265,11 +305,7 @@ public class BulkItemResponse implements Streamable, StatusToXContent {
|
|||
}
|
||||
|
||||
if (in.readBoolean()) {
|
||||
String fIndex = in.readString();
|
||||
String fType = in.readString();
|
||||
String fId = in.readOptionalString();
|
||||
Throwable throwable = in.readThrowable();
|
||||
failure = new Failure(fIndex, fType, fId, throwable);
|
||||
failure = Failure.PROTOTYPE.readFrom(in);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -294,10 +330,7 @@ public class BulkItemResponse implements Streamable, StatusToXContent {
|
|||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
out.writeString(failure.getIndex());
|
||||
out.writeString(failure.getType());
|
||||
out.writeOptionalString(failure.getId());
|
||||
out.writeThrowable(failure.getCause());
|
||||
failure.writeTo(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,31 +35,53 @@ import java.util.Iterator;
|
|||
*/
|
||||
public class BulkResponse extends ActionResponse implements Iterable<BulkItemResponse> {
|
||||
|
||||
public final static long NO_INGEST_TOOK = -1L;
|
||||
|
||||
private BulkItemResponse[] responses;
|
||||
private long tookInMillis;
|
||||
private long ingestTookInMillis;
|
||||
|
||||
BulkResponse() {
|
||||
}
|
||||
|
||||
public BulkResponse(BulkItemResponse[] responses, long tookInMillis) {
|
||||
this(responses, tookInMillis, NO_INGEST_TOOK);
|
||||
}
|
||||
|
||||
public BulkResponse(BulkItemResponse[] responses, long tookInMillis, long ingestTookInMillis) {
|
||||
this.responses = responses;
|
||||
this.tookInMillis = tookInMillis;
|
||||
this.ingestTookInMillis = ingestTookInMillis;
|
||||
}
|
||||
|
||||
/**
|
||||
* How long the bulk execution took.
|
||||
* How long the bulk execution took. Excluding ingest preprocessing.
|
||||
*/
|
||||
public TimeValue getTook() {
|
||||
return new TimeValue(tookInMillis);
|
||||
}
|
||||
|
||||
/**
|
||||
* How long the bulk execution took in milliseconds.
|
||||
* How long the bulk execution took in milliseconds. Excluding ingest preprocessing.
|
||||
*/
|
||||
public long getTookInMillis() {
|
||||
return tookInMillis;
|
||||
}
|
||||
|
||||
/**
|
||||
* If ingest is enabled returns the bulk ingest preprocessing time, otherwise 0 is returned.
|
||||
*/
|
||||
public TimeValue getIngestTook() {
|
||||
return new TimeValue(ingestTookInMillis);
|
||||
}
|
||||
|
||||
/**
|
||||
* If ingest is enabled returns the bulk ingest preprocessing time. in milliseconds, otherwise -1 is returned.
|
||||
*/
|
||||
public long getIngestTookInMillis() {
|
||||
return ingestTookInMillis;
|
||||
}
|
||||
|
||||
/**
|
||||
* Has anything failed with the execution.
|
||||
*/
|
||||
|
@ -106,6 +128,7 @@ public class BulkResponse extends ActionResponse implements Iterable<BulkItemRes
|
|||
responses[i] = BulkItemResponse.readBulkItem(in);
|
||||
}
|
||||
tookInMillis = in.readVLong();
|
||||
ingestTookInMillis = in.readZLong();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -116,5 +139,6 @@ public class BulkResponse extends ActionResponse implements Iterable<BulkItemRes
|
|||
response.writeTo(out);
|
||||
}
|
||||
out.writeVLong(tookInMillis);
|
||||
out.writeZLong(ingestTookInMillis);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -94,6 +94,12 @@ public class BulkShardRequest extends ReplicationRequest<BulkShardRequest> {
|
|||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "shard bulk {" + super.toString() + "}";
|
||||
// This is included in error messages so we'll try to make it somewhat user friendly.
|
||||
StringBuilder b = new StringBuilder("BulkShardRequest to [");
|
||||
b.append(index).append("] containing [").append(items.length).append("] requests");
|
||||
if (refresh) {
|
||||
b.append(" and a refresh");
|
||||
}
|
||||
return b.toString();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,7 +38,7 @@ import java.util.function.Predicate;
|
|||
/**
|
||||
* Encapsulates synchronous and asynchronous retry logic.
|
||||
*/
|
||||
class Retry {
|
||||
public class Retry {
|
||||
private final Class<? extends Throwable> retryOnThrowable;
|
||||
|
||||
private BackoffPolicy backoffPolicy;
|
||||
|
|
|
@ -30,10 +30,12 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
|||
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.action.delete.TransportDeleteAction;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.AutoCreateIndex;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.action.update.TransportUpdateAction;
|
||||
import org.elasticsearch.action.update.UpdateRequest;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
|
@ -42,12 +44,9 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
|
|||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.MappingMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.IndexAlreadyExistsException;
|
||||
|
@ -61,8 +60,11 @@ import java.util.HashSet;
|
|||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.function.LongSupplier;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -74,27 +76,41 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
private final ClusterService clusterService;
|
||||
private final TransportShardBulkAction shardBulkAction;
|
||||
private final TransportCreateIndexAction createIndexAction;
|
||||
private final LongSupplier relativeTimeProvider;
|
||||
|
||||
@Inject
|
||||
public TransportBulkAction(Settings settings, ThreadPool threadPool, TransportService transportService, ClusterService clusterService,
|
||||
TransportShardBulkAction shardBulkAction, TransportCreateIndexAction createIndexAction,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
AutoCreateIndex autoCreateIndex) {
|
||||
this(settings, threadPool, transportService, clusterService,
|
||||
shardBulkAction, createIndexAction,
|
||||
actionFilters, indexNameExpressionResolver,
|
||||
autoCreateIndex,
|
||||
System::nanoTime);
|
||||
}
|
||||
|
||||
public TransportBulkAction(Settings settings, ThreadPool threadPool, TransportService transportService, ClusterService clusterService,
|
||||
TransportShardBulkAction shardBulkAction, TransportCreateIndexAction createIndexAction,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
AutoCreateIndex autoCreateIndex, LongSupplier relativeTimeProvider) {
|
||||
super(settings, BulkAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, BulkRequest::new);
|
||||
Objects.requireNonNull(relativeTimeProvider);
|
||||
this.clusterService = clusterService;
|
||||
this.shardBulkAction = shardBulkAction;
|
||||
this.createIndexAction = createIndexAction;
|
||||
|
||||
this.autoCreateIndex = autoCreateIndex;
|
||||
this.allowIdGeneration = this.settings.getAsBoolean("action.bulk.action.allow_id_generation", true);
|
||||
this.relativeTimeProvider = relativeTimeProvider;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(final BulkRequest bulkRequest, final ActionListener<BulkResponse> listener) {
|
||||
final long startTime = System.currentTimeMillis();
|
||||
final long startTime = relativeTime();
|
||||
final AtomicArray<BulkItemResponse> responses = new AtomicArray<>(bulkRequest.requests.size());
|
||||
|
||||
if (autoCreateIndex.needToCheck()) {
|
||||
if (needToCheck()) {
|
||||
// Keep track of all unique indices and all unique types per index for the create index requests:
|
||||
final Map<String, Set<String>> indicesAndTypes = new HashMap<>();
|
||||
for (ActionRequest request : bulkRequest.requests) {
|
||||
|
@ -113,7 +129,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
ClusterState state = clusterService.state();
|
||||
for (Map.Entry<String, Set<String>> entry : indicesAndTypes.entrySet()) {
|
||||
final String index = entry.getKey();
|
||||
if (autoCreateIndex.shouldAutoCreate(index, state)) {
|
||||
if (shouldAutoCreate(index, state)) {
|
||||
CreateIndexRequest createIndexRequest = new CreateIndexRequest();
|
||||
createIndexRequest.index(index);
|
||||
for (String type : entry.getValue()) {
|
||||
|
@ -164,6 +180,14 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
}
|
||||
}
|
||||
|
||||
boolean needToCheck() {
|
||||
return autoCreateIndex.needToCheck();
|
||||
}
|
||||
|
||||
boolean shouldAutoCreate(String index, ClusterState state) {
|
||||
return autoCreateIndex.shouldAutoCreate(index, state);
|
||||
}
|
||||
|
||||
private boolean setResponseFailureIfIndexMatches(AtomicArray<BulkItemResponse> responses, int idx, ActionRequest request, String index, Throwable e) {
|
||||
if (request instanceof IndexRequest) {
|
||||
IndexRequest indexRequest = (IndexRequest) request;
|
||||
|
@ -196,16 +220,15 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
* @see #doExecute(BulkRequest, org.elasticsearch.action.ActionListener)
|
||||
*/
|
||||
public void executeBulk(final BulkRequest bulkRequest, final ActionListener<BulkResponse> listener) {
|
||||
final long startTime = System.currentTimeMillis();
|
||||
executeBulk(bulkRequest, startTime, listener, new AtomicArray<BulkItemResponse>(bulkRequest.requests.size()));
|
||||
final long startTimeNanos = relativeTime();
|
||||
executeBulk(bulkRequest, startTimeNanos, listener, new AtomicArray<>(bulkRequest.requests.size()));
|
||||
}
|
||||
|
||||
private final long buildTookInMillis(long startTime) {
|
||||
// protect ourselves against time going backwards
|
||||
return Math.max(1, System.currentTimeMillis() - startTime);
|
||||
private long buildTookInMillis(long startTimeNanos) {
|
||||
return TimeUnit.NANOSECONDS.toMillis(relativeTime() - startTimeNanos);
|
||||
}
|
||||
|
||||
private void executeBulk(final BulkRequest bulkRequest, final long startTime, final ActionListener<BulkResponse> listener, final AtomicArray<BulkItemResponse> responses ) {
|
||||
void executeBulk(final BulkRequest bulkRequest, final long startTimeNanos, final ActionListener<BulkResponse> listener, final AtomicArray<BulkItemResponse> responses ) {
|
||||
final ClusterState clusterState = clusterService.state();
|
||||
// TODO use timeout to wait here if its blocked...
|
||||
clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.WRITE);
|
||||
|
@ -214,33 +237,53 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
MetaData metaData = clusterState.metaData();
|
||||
for (int i = 0; i < bulkRequest.requests.size(); i++) {
|
||||
ActionRequest request = bulkRequest.requests.get(i);
|
||||
if (request instanceof DocumentRequest) {
|
||||
DocumentRequest req = (DocumentRequest) request;
|
||||
|
||||
if (addFailureIfIndexIsUnavailable(req, bulkRequest, responses, i, concreteIndices, metaData)) {
|
||||
continue;
|
||||
//the request can only be null because we set it to null in the previous step, so it gets ignored
|
||||
if (request == null) {
|
||||
continue;
|
||||
}
|
||||
DocumentRequest documentRequest = (DocumentRequest) request;
|
||||
if (addFailureIfIndexIsUnavailable(documentRequest, bulkRequest, responses, i, concreteIndices, metaData)) {
|
||||
continue;
|
||||
}
|
||||
String concreteIndex = concreteIndices.resolveIfAbsent(documentRequest);
|
||||
if (request instanceof IndexRequest) {
|
||||
IndexRequest indexRequest = (IndexRequest) request;
|
||||
MappingMetaData mappingMd = null;
|
||||
if (metaData.hasIndex(concreteIndex)) {
|
||||
mappingMd = metaData.index(concreteIndex).mappingOrDefault(indexRequest.type());
|
||||
}
|
||||
try {
|
||||
indexRequest.process(metaData, mappingMd, allowIdGeneration, concreteIndex);
|
||||
} catch (ElasticsearchParseException | RoutingMissingException e) {
|
||||
BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex, indexRequest.type(), indexRequest.id(), e);
|
||||
BulkItemResponse bulkItemResponse = new BulkItemResponse(i, "index", failure);
|
||||
responses.set(i, bulkItemResponse);
|
||||
// make sure the request gets never processed again
|
||||
bulkRequest.requests.set(i, null);
|
||||
}
|
||||
} else if (request instanceof DeleteRequest) {
|
||||
try {
|
||||
TransportDeleteAction.resolveAndValidateRouting(metaData, concreteIndex, (DeleteRequest)request);
|
||||
} catch(RoutingMissingException e) {
|
||||
BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex, documentRequest.type(), documentRequest.id(), e);
|
||||
BulkItemResponse bulkItemResponse = new BulkItemResponse(i, "delete", failure);
|
||||
responses.set(i, bulkItemResponse);
|
||||
// make sure the request gets never processed again
|
||||
bulkRequest.requests.set(i, null);
|
||||
}
|
||||
|
||||
String concreteIndex = concreteIndices.resolveIfAbsent(req);
|
||||
if (request instanceof IndexRequest) {
|
||||
IndexRequest indexRequest = (IndexRequest) request;
|
||||
MappingMetaData mappingMd = null;
|
||||
if (metaData.hasIndex(concreteIndex)) {
|
||||
mappingMd = metaData.index(concreteIndex).mappingOrDefault(indexRequest.type());
|
||||
}
|
||||
try {
|
||||
indexRequest.process(metaData, mappingMd, allowIdGeneration, concreteIndex);
|
||||
} catch (ElasticsearchParseException | RoutingMissingException e) {
|
||||
BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex, indexRequest.type(), indexRequest.id(), e);
|
||||
BulkItemResponse bulkItemResponse = new BulkItemResponse(i, "index", failure);
|
||||
responses.set(i, bulkItemResponse);
|
||||
// make sure the request gets never processed again
|
||||
bulkRequest.requests.set(i, null);
|
||||
}
|
||||
} else {
|
||||
concreteIndices.resolveIfAbsent(req);
|
||||
req.routing(clusterState.metaData().resolveIndexRouting(req.parent(), req.routing(), req.index()));
|
||||
} else if (request instanceof UpdateRequest) {
|
||||
try {
|
||||
TransportUpdateAction.resolveAndValidateRouting(metaData, concreteIndex, (UpdateRequest)request);
|
||||
} catch(RoutingMissingException e) {
|
||||
BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex, documentRequest.type(), documentRequest.id(), e);
|
||||
BulkItemResponse bulkItemResponse = new BulkItemResponse(i, "update", failure);
|
||||
responses.set(i, bulkItemResponse);
|
||||
// make sure the request gets never processed again
|
||||
bulkRequest.requests.set(i, null);
|
||||
}
|
||||
} else {
|
||||
throw new AssertionError("request type not supported: [" + request.getClass().getName() + "]");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -262,37 +305,16 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
} else if (request instanceof DeleteRequest) {
|
||||
DeleteRequest deleteRequest = (DeleteRequest) request;
|
||||
String concreteIndex = concreteIndices.getConcreteIndex(deleteRequest.index());
|
||||
MappingMetaData mappingMd = clusterState.metaData().index(concreteIndex).mappingOrDefault(deleteRequest.type());
|
||||
if (mappingMd != null && mappingMd.routing().required() && deleteRequest.routing() == null) {
|
||||
// if routing is required, and no routing on the delete request, we need to broadcast it....
|
||||
GroupShardsIterator groupShards = clusterService.operationRouting().broadcastDeleteShards(clusterState, concreteIndex);
|
||||
for (ShardIterator shardIt : groupShards) {
|
||||
List<BulkItemRequest> list = requestsByShard.get(shardIt.shardId());
|
||||
if (list == null) {
|
||||
list = new ArrayList<>();
|
||||
requestsByShard.put(shardIt.shardId(), list);
|
||||
}
|
||||
list.add(new BulkItemRequest(i, deleteRequest));
|
||||
}
|
||||
} else {
|
||||
ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, deleteRequest.type(), deleteRequest.id(), deleteRequest.routing()).shardId();
|
||||
List<BulkItemRequest> list = requestsByShard.get(shardId);
|
||||
if (list == null) {
|
||||
list = new ArrayList<>();
|
||||
requestsByShard.put(shardId, list);
|
||||
}
|
||||
list.add(new BulkItemRequest(i, request));
|
||||
ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, deleteRequest.type(), deleteRequest.id(), deleteRequest.routing()).shardId();
|
||||
List<BulkItemRequest> list = requestsByShard.get(shardId);
|
||||
if (list == null) {
|
||||
list = new ArrayList<>();
|
||||
requestsByShard.put(shardId, list);
|
||||
}
|
||||
list.add(new BulkItemRequest(i, request));
|
||||
} else if (request instanceof UpdateRequest) {
|
||||
UpdateRequest updateRequest = (UpdateRequest) request;
|
||||
String concreteIndex = concreteIndices.getConcreteIndex(updateRequest.index());
|
||||
MappingMetaData mappingMd = clusterState.metaData().index(concreteIndex).mappingOrDefault(updateRequest.type());
|
||||
if (mappingMd != null && mappingMd.routing().required() && updateRequest.routing() == null) {
|
||||
BulkItemResponse.Failure failure = new BulkItemResponse.Failure(updateRequest.index(), updateRequest.type(),
|
||||
updateRequest.id(), new IllegalArgumentException("routing is required for this item"));
|
||||
responses.set(i, new BulkItemResponse(i, updateRequest.type(), failure));
|
||||
continue;
|
||||
}
|
||||
ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, updateRequest.type(), updateRequest.id(), updateRequest.routing()).shardId();
|
||||
List<BulkItemRequest> list = requestsByShard.get(shardId);
|
||||
if (list == null) {
|
||||
|
@ -304,7 +326,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
}
|
||||
|
||||
if (requestsByShard.isEmpty()) {
|
||||
listener.onResponse(new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTime)));
|
||||
listener.onResponse(new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTimeNanos)));
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -354,7 +376,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
}
|
||||
|
||||
private void finishHim() {
|
||||
listener.onResponse(new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTime)));
|
||||
listener.onResponse(new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTimeNanos)));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
@ -400,7 +422,6 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
return false;
|
||||
}
|
||||
|
||||
|
||||
private static class ConcreteIndices {
|
||||
private final ClusterState state;
|
||||
private final IndexNameExpressionResolver indexNameExpressionResolver;
|
||||
|
@ -424,4 +445,9 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
return concreteIndex;
|
||||
}
|
||||
}
|
||||
|
||||
private long relativeTime() {
|
||||
return relativeTimeProvider.getAsLong();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -96,23 +96,27 @@ public class TransportDeleteAction extends TransportReplicationAction<DeleteRequ
|
|||
|
||||
@Override
|
||||
protected void resolveRequest(final MetaData metaData, String concreteIndex, DeleteRequest request) {
|
||||
resolveAndValidateRouting(metaData, concreteIndex, request);
|
||||
ShardId shardId = clusterService.operationRouting().shardId(clusterService.state(), concreteIndex, request.id(), request.routing());
|
||||
request.setShardId(shardId);
|
||||
}
|
||||
|
||||
public static void resolveAndValidateRouting(final MetaData metaData, String concreteIndex, DeleteRequest request) {
|
||||
request.routing(metaData.resolveIndexRouting(request.parent(), request.routing(), request.index()));
|
||||
if (metaData.hasIndex(concreteIndex)) {
|
||||
// check if routing is required, if so, do a broadcast delete
|
||||
// check if routing is required, if so, throw error if routing wasn't specified
|
||||
MappingMetaData mappingMd = metaData.index(concreteIndex).mappingOrDefault(request.type());
|
||||
if (mappingMd != null && mappingMd.routing().required()) {
|
||||
if (request.routing() == null) {
|
||||
if (request.versionType() != VersionType.INTERNAL) {
|
||||
// TODO: implement this feature
|
||||
throw new IllegalArgumentException("routing value is required for deleting documents of type [" + request.type()
|
||||
+ "] while using version_type [" + request.versionType() + "]");
|
||||
+ "] while using version_type [" + request.versionType() + "]");
|
||||
}
|
||||
throw new RoutingMissingException(concreteIndex, request.type(), request.id());
|
||||
}
|
||||
}
|
||||
}
|
||||
ShardId shardId = clusterService.operationRouting().shardId(clusterService.state(), concreteIndex, request.id(), request.routing());
|
||||
request.setShardId(shardId);
|
||||
}
|
||||
|
||||
private void innerExecute(Task task, final DeleteRequest request, final ActionListener<DeleteResponse> listener) {
|
||||
|
|
|
@ -223,6 +223,13 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
|||
return validationException;
|
||||
}
|
||||
|
||||
/**
|
||||
* The content type that will be used when generating a document from user provided objects like Maps.
|
||||
*/
|
||||
public XContentType getContentType() {
|
||||
return contentType;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the content type that will be used when generating a document from user provided objects (like Map).
|
||||
*/
|
||||
|
@ -294,6 +301,7 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
|||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String parent() {
|
||||
return this.parent;
|
||||
}
|
||||
|
@ -645,7 +653,7 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
|||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
type = in.readString();
|
||||
type = in.readOptionalString();
|
||||
id = in.readOptionalString();
|
||||
routing = in.readOptionalString();
|
||||
parent = in.readOptionalString();
|
||||
|
@ -663,7 +671,7 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
|||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeString(type);
|
||||
out.writeOptionalString(type);
|
||||
out.writeOptionalString(id);
|
||||
out.writeOptionalString(routing);
|
||||
out.writeOptionalString(parent);
|
||||
|
|
|
@ -43,6 +43,7 @@ import java.util.HashSet;
|
|||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
public final class IngestActionFilter extends AbstractComponent implements ActionFilter {
|
||||
|
||||
|
@ -101,6 +102,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio
|
|||
}
|
||||
|
||||
void processBulkIndexRequest(Task task, BulkRequest original, String action, ActionFilterChain chain, ActionListener<BulkResponse> listener) {
|
||||
long ingestStartTimeInNanos = System.nanoTime();
|
||||
BulkRequestModifier bulkRequestModifier = new BulkRequestModifier(original);
|
||||
executionService.executeBulkRequest(() -> bulkRequestModifier, (indexRequest, throwable) -> {
|
||||
logger.debug("failed to execute pipeline [{}] for document [{}/{}/{}]", throwable, indexRequest.getPipeline(), indexRequest.index(), indexRequest.type(), indexRequest.id());
|
||||
|
@ -110,8 +112,9 @@ public final class IngestActionFilter extends AbstractComponent implements Actio
|
|||
logger.error("failed to execute pipeline for a bulk request", throwable);
|
||||
listener.onFailure(throwable);
|
||||
} else {
|
||||
long ingestTookInMillis = TimeUnit.MILLISECONDS.convert(System.nanoTime() - ingestStartTimeInNanos, TimeUnit.NANOSECONDS);
|
||||
BulkRequest bulkRequest = bulkRequestModifier.getBulkRequest();
|
||||
ActionListener<BulkResponse> actionListener = bulkRequestModifier.wrapActionListenerIfNeeded(listener);
|
||||
ActionListener<BulkResponse> actionListener = bulkRequestModifier.wrapActionListenerIfNeeded(ingestTookInMillis, listener);
|
||||
if (bulkRequest.requests().isEmpty()) {
|
||||
// at this stage, the transport bulk action can't deal with a bulk request with no requests,
|
||||
// so we stop and send an empty response back to the client.
|
||||
|
@ -176,11 +179,21 @@ public final class IngestActionFilter extends AbstractComponent implements Actio
|
|||
}
|
||||
}
|
||||
|
||||
ActionListener<BulkResponse> wrapActionListenerIfNeeded(ActionListener<BulkResponse> actionListener) {
|
||||
ActionListener<BulkResponse> wrapActionListenerIfNeeded(long ingestTookInMillis, ActionListener<BulkResponse> actionListener) {
|
||||
if (itemResponses.isEmpty()) {
|
||||
return actionListener;
|
||||
return new ActionListener<BulkResponse>() {
|
||||
@Override
|
||||
public void onResponse(BulkResponse response) {
|
||||
actionListener.onResponse(new BulkResponse(response.getItems(), response.getTookInMillis(), ingestTookInMillis));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
actionListener.onFailure(e);
|
||||
}
|
||||
};
|
||||
} else {
|
||||
return new IngestBulkResponseListener(originalSlots, itemResponses, actionListener);
|
||||
return new IngestBulkResponseListener(ingestTookInMillis, originalSlots, itemResponses, actionListener);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -197,24 +210,26 @@ public final class IngestActionFilter extends AbstractComponent implements Actio
|
|||
|
||||
}
|
||||
|
||||
private final static class IngestBulkResponseListener implements ActionListener<BulkResponse> {
|
||||
final static class IngestBulkResponseListener implements ActionListener<BulkResponse> {
|
||||
|
||||
private final long ingestTookInMillis;
|
||||
private final int[] originalSlots;
|
||||
private final List<BulkItemResponse> itemResponses;
|
||||
private final ActionListener<BulkResponse> actionListener;
|
||||
|
||||
IngestBulkResponseListener(int[] originalSlots, List<BulkItemResponse> itemResponses, ActionListener<BulkResponse> actionListener) {
|
||||
IngestBulkResponseListener(long ingestTookInMillis, int[] originalSlots, List<BulkItemResponse> itemResponses, ActionListener<BulkResponse> actionListener) {
|
||||
this.ingestTookInMillis = ingestTookInMillis;
|
||||
this.itemResponses = itemResponses;
|
||||
this.actionListener = actionListener;
|
||||
this.originalSlots = originalSlots;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onResponse(BulkResponse bulkItemResponses) {
|
||||
for (int i = 0; i < bulkItemResponses.getItems().length; i++) {
|
||||
itemResponses.add(originalSlots[i], bulkItemResponses.getItems()[i]);
|
||||
public void onResponse(BulkResponse response) {
|
||||
for (int i = 0; i < response.getItems().length; i++) {
|
||||
itemResponses.add(originalSlots[i], response.getItems()[i]);
|
||||
}
|
||||
actionListener.onResponse(new BulkResponse(itemResponses.toArray(new BulkItemResponse[itemResponses.size()]), bulkItemResponses.getTookInMillis()));
|
||||
actionListener.onResponse(new BulkResponse(itemResponses.toArray(new BulkItemResponse[itemResponses.size()]), response.getTookInMillis(), ingestTookInMillis));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -20,6 +20,10 @@
|
|||
package org.elasticsearch.action.ingest;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.TransportNodesInfoAction;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
|
@ -27,24 +31,32 @@ import org.elasticsearch.cluster.ClusterState;
|
|||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.ingest.PipelineStore;
|
||||
import org.elasticsearch.ingest.core.IngestInfo;
|
||||
import org.elasticsearch.node.service.NodeService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
public class PutPipelineTransportAction extends TransportMasterNodeAction<PutPipelineRequest, WritePipelineResponse> {
|
||||
|
||||
private final PipelineStore pipelineStore;
|
||||
private final ClusterService clusterService;
|
||||
private final TransportNodesInfoAction nodesInfoAction;
|
||||
|
||||
@Inject
|
||||
public PutPipelineTransportAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
TransportService transportService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, NodeService nodeService) {
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, NodeService nodeService,
|
||||
TransportNodesInfoAction nodesInfoAction) {
|
||||
super(settings, PutPipelineAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, PutPipelineRequest::new);
|
||||
this.clusterService = clusterService;
|
||||
this.nodesInfoAction = nodesInfoAction;
|
||||
this.pipelineStore = nodeService.getIngestService().getPipelineStore();
|
||||
}
|
||||
|
||||
|
@ -60,7 +72,28 @@ public class PutPipelineTransportAction extends TransportMasterNodeAction<PutPip
|
|||
|
||||
@Override
|
||||
protected void masterOperation(PutPipelineRequest request, ClusterState state, ActionListener<WritePipelineResponse> listener) throws Exception {
|
||||
pipelineStore.put(clusterService, request, listener);
|
||||
NodesInfoRequest nodesInfoRequest = new NodesInfoRequest();
|
||||
nodesInfoRequest.clear();
|
||||
nodesInfoRequest.ingest(true);
|
||||
nodesInfoAction.execute(nodesInfoRequest, new ActionListener<NodesInfoResponse>() {
|
||||
@Override
|
||||
public void onResponse(NodesInfoResponse nodeInfos) {
|
||||
try {
|
||||
Map<DiscoveryNode, IngestInfo> ingestInfos = new HashMap<>();
|
||||
for (NodeInfo nodeInfo : nodeInfos) {
|
||||
ingestInfos.put(nodeInfo.getNode(), nodeInfo.getIngest());
|
||||
}
|
||||
pipelineStore.put(clusterService, ingestInfos, request, listener);
|
||||
} catch (Exception e) {
|
||||
onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -17,12 +17,12 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search.type;
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
/**
|
||||
* Base implementation for an async action.
|
||||
*/
|
||||
public class AbstractAsyncAction {
|
||||
abstract class AbstractAsyncAction {
|
||||
|
||||
private final long startTime;
|
||||
|
||||
|
@ -46,4 +46,5 @@ public class AbstractAsyncAction {
|
|||
return Math.max(1, System.currentTimeMillis() - startTime);
|
||||
}
|
||||
|
||||
abstract void start();
|
||||
}
|
|
@ -0,0 +1,393 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.NoShardAvailableActionException;
|
||||
import org.elasticsearch.action.support.TransportActions;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.SearchPhaseResult;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.fetch.ShardFetchSearchRequest;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.search.query.QuerySearchResult;
|
||||
import org.elasticsearch.search.query.QuerySearchResultProvider;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import static org.elasticsearch.action.search.TransportSearchHelper.internalSearchRequest;
|
||||
|
||||
abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult> extends AbstractAsyncAction {
|
||||
|
||||
protected final ESLogger logger;
|
||||
protected final SearchTransportService searchTransportService;
|
||||
private final IndexNameExpressionResolver indexNameExpressionResolver;
|
||||
protected final SearchPhaseController searchPhaseController;
|
||||
protected final ThreadPool threadPool;
|
||||
protected final ActionListener<SearchResponse> listener;
|
||||
protected final GroupShardsIterator shardsIts;
|
||||
protected final SearchRequest request;
|
||||
protected final ClusterState clusterState;
|
||||
protected final DiscoveryNodes nodes;
|
||||
protected final int expectedSuccessfulOps;
|
||||
private final int expectedTotalOps;
|
||||
protected final AtomicInteger successfulOps = new AtomicInteger();
|
||||
private final AtomicInteger totalOps = new AtomicInteger();
|
||||
protected final AtomicArray<FirstResult> firstResults;
|
||||
private volatile AtomicArray<ShardSearchFailure> shardFailures;
|
||||
private final Object shardFailuresMutex = new Object();
|
||||
protected volatile ScoreDoc[] sortedShardList;
|
||||
|
||||
protected AbstractSearchAsyncAction(ESLogger logger, SearchTransportService searchTransportService, ClusterService clusterService,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
SearchPhaseController searchPhaseController, ThreadPool threadPool, SearchRequest request,
|
||||
ActionListener<SearchResponse> listener) {
|
||||
this.logger = logger;
|
||||
this.searchTransportService = searchTransportService;
|
||||
this.indexNameExpressionResolver = indexNameExpressionResolver;
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
this.threadPool = threadPool;
|
||||
this.request = request;
|
||||
this.listener = listener;
|
||||
|
||||
this.clusterState = clusterService.state();
|
||||
nodes = clusterState.nodes();
|
||||
|
||||
clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ);
|
||||
|
||||
// TODO: I think startTime() should become part of ActionRequest and that should be used both for index name
|
||||
// date math expressions and $now in scripts. This way all apis will deal with now in the same way instead
|
||||
// of just for the _search api
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request.indicesOptions(),
|
||||
startTime(), request.indices());
|
||||
|
||||
for (String index : concreteIndices) {
|
||||
clusterState.blocks().indexBlockedRaiseException(ClusterBlockLevel.READ, index);
|
||||
}
|
||||
|
||||
Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, request.routing(),
|
||||
request.indices());
|
||||
|
||||
shardsIts = clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, request.preference());
|
||||
expectedSuccessfulOps = shardsIts.size();
|
||||
// we need to add 1 for non active partition, since we count it in the total!
|
||||
expectedTotalOps = shardsIts.totalSizeWith1ForEmpty();
|
||||
|
||||
firstResults = new AtomicArray<>(shardsIts.size());
|
||||
}
|
||||
|
||||
public void start() {
|
||||
if (expectedSuccessfulOps == 0) {
|
||||
//no search shards to search on, bail with empty response
|
||||
//(it happens with search across _all with no indices around and consistent with broadcast operations)
|
||||
listener.onResponse(new SearchResponse(InternalSearchResponse.empty(), null, 0, 0, buildTookInMillis(),
|
||||
ShardSearchFailure.EMPTY_ARRAY));
|
||||
return;
|
||||
}
|
||||
int shardIndex = -1;
|
||||
for (final ShardIterator shardIt : shardsIts) {
|
||||
shardIndex++;
|
||||
final ShardRouting shard = shardIt.nextOrNull();
|
||||
if (shard != null) {
|
||||
performFirstPhase(shardIndex, shardIt, shard);
|
||||
} else {
|
||||
// really, no shards active in this group
|
||||
onFirstPhaseResult(shardIndex, null, null, shardIt, new NoShardAvailableActionException(shardIt.shardId()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void performFirstPhase(final int shardIndex, final ShardIterator shardIt, final ShardRouting shard) {
|
||||
if (shard == null) {
|
||||
// no more active shards... (we should not really get here, but just for safety)
|
||||
onFirstPhaseResult(shardIndex, null, null, shardIt, new NoShardAvailableActionException(shardIt.shardId()));
|
||||
} else {
|
||||
final DiscoveryNode node = nodes.get(shard.currentNodeId());
|
||||
if (node == null) {
|
||||
onFirstPhaseResult(shardIndex, shard, null, shardIt, new NoShardAvailableActionException(shardIt.shardId()));
|
||||
} else {
|
||||
String[] filteringAliases = indexNameExpressionResolver.filteringAliases(clusterState,
|
||||
shard.index().getName(), request.indices());
|
||||
sendExecuteFirstPhase(node, internalSearchRequest(shard, shardsIts.size(), request, filteringAliases,
|
||||
startTime()), new ActionListener<FirstResult>() {
|
||||
@Override
|
||||
public void onResponse(FirstResult result) {
|
||||
onFirstPhaseResult(shardIndex, shard, result, shardIt);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
onFirstPhaseResult(shardIndex, shard, node.id(), shardIt, t);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void onFirstPhaseResult(int shardIndex, ShardRouting shard, FirstResult result, ShardIterator shardIt) {
|
||||
result.shardTarget(new SearchShardTarget(shard.currentNodeId(), shard.index(), shard.id()));
|
||||
processFirstPhaseResult(shardIndex, result);
|
||||
// we need to increment successful ops first before we compare the exit condition otherwise if we
|
||||
// are fast we could concurrently update totalOps but then preempt one of the threads which can
|
||||
// cause the successor to read a wrong value from successfulOps if second phase is very fast ie. count etc.
|
||||
successfulOps.incrementAndGet();
|
||||
// increment all the "future" shards to update the total ops since we some may work and some may not...
|
||||
// and when that happens, we break on total ops, so we must maintain them
|
||||
final int xTotalOps = totalOps.addAndGet(shardIt.remaining() + 1);
|
||||
if (xTotalOps == expectedTotalOps) {
|
||||
try {
|
||||
innerMoveToSecondPhase();
|
||||
} catch (Throwable e) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug(shardIt.shardId() + ": Failed to execute [" + request + "] while moving to second phase", e);
|
||||
}
|
||||
raiseEarlyFailure(new ReduceSearchPhaseException(firstPhaseName(), "", e, buildShardFailures()));
|
||||
}
|
||||
} else if (xTotalOps > expectedTotalOps) {
|
||||
raiseEarlyFailure(new IllegalStateException("unexpected higher total ops [" + xTotalOps + "] compared " +
|
||||
"to expected [" + expectedTotalOps + "]"));
|
||||
}
|
||||
}
|
||||
|
||||
void onFirstPhaseResult(final int shardIndex, @Nullable ShardRouting shard, @Nullable String nodeId,
|
||||
final ShardIterator shardIt, Throwable t) {
|
||||
// we always add the shard failure for a specific shard instance
|
||||
// we do make sure to clean it on a successful response from a shard
|
||||
SearchShardTarget shardTarget = new SearchShardTarget(nodeId, shardIt.shardId().getIndex(), shardIt.shardId().getId());
|
||||
addShardFailure(shardIndex, shardTarget, t);
|
||||
|
||||
if (totalOps.incrementAndGet() == expectedTotalOps) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
if (t != null && !TransportActions.isShardNotAvailableException(t)) {
|
||||
if (shard != null) {
|
||||
logger.debug(shard.shortSummary() + ": Failed to execute [" + request + "]", t);
|
||||
} else {
|
||||
logger.debug(shardIt.shardId() + ": Failed to execute [" + request + "]", t);
|
||||
}
|
||||
} else if (logger.isTraceEnabled()) {
|
||||
logger.trace("{}: Failed to execute [{}]", t, shard, request);
|
||||
}
|
||||
}
|
||||
final ShardSearchFailure[] shardSearchFailures = buildShardFailures();
|
||||
if (successfulOps.get() == 0) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("All shards failed for phase: [{}]", t, firstPhaseName());
|
||||
}
|
||||
|
||||
// no successful ops, raise an exception
|
||||
raiseEarlyFailure(new SearchPhaseExecutionException(firstPhaseName(), "all shards failed", t, shardSearchFailures));
|
||||
} else {
|
||||
try {
|
||||
innerMoveToSecondPhase();
|
||||
} catch (Throwable e) {
|
||||
raiseEarlyFailure(new ReduceSearchPhaseException(firstPhaseName(), "", e, shardSearchFailures));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
final ShardRouting nextShard = shardIt.nextOrNull();
|
||||
final boolean lastShard = nextShard == null;
|
||||
// trace log this exception
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace(executionFailureMsg(shard, shardIt, request, lastShard), t);
|
||||
}
|
||||
if (!lastShard) {
|
||||
try {
|
||||
performFirstPhase(shardIndex, shardIt, nextShard);
|
||||
} catch (Throwable t1) {
|
||||
onFirstPhaseResult(shardIndex, shard, shard.currentNodeId(), shardIt, t1);
|
||||
}
|
||||
} else {
|
||||
// no more shards active, add a failure
|
||||
if (logger.isDebugEnabled() && !logger.isTraceEnabled()) { // do not double log this exception
|
||||
if (t != null && !TransportActions.isShardNotAvailableException(t)) {
|
||||
logger.debug(executionFailureMsg(shard, shardIt, request, lastShard), t);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private String executionFailureMsg(@Nullable ShardRouting shard, final ShardIterator shardIt, SearchRequest request,
|
||||
boolean lastShard) {
|
||||
if (shard != null) {
|
||||
return shard.shortSummary() + ": Failed to execute [" + request + "] lastShard [" + lastShard + "]";
|
||||
} else {
|
||||
return shardIt.shardId() + ": Failed to execute [" + request + "] lastShard [" + lastShard + "]";
|
||||
}
|
||||
}
|
||||
|
||||
protected final ShardSearchFailure[] buildShardFailures() {
|
||||
AtomicArray<ShardSearchFailure> shardFailures = this.shardFailures;
|
||||
if (shardFailures == null) {
|
||||
return ShardSearchFailure.EMPTY_ARRAY;
|
||||
}
|
||||
List<AtomicArray.Entry<ShardSearchFailure>> entries = shardFailures.asList();
|
||||
ShardSearchFailure[] failures = new ShardSearchFailure[entries.size()];
|
||||
for (int i = 0; i < failures.length; i++) {
|
||||
failures[i] = entries.get(i).value;
|
||||
}
|
||||
return failures;
|
||||
}
|
||||
|
||||
protected final void addShardFailure(final int shardIndex, @Nullable SearchShardTarget shardTarget, Throwable t) {
|
||||
// we don't aggregate shard failures on non active shards (but do keep the header counts right)
|
||||
if (TransportActions.isShardNotAvailableException(t)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// lazily create shard failures, so we can early build the empty shard failure list in most cases (no failures)
|
||||
if (shardFailures == null) {
|
||||
synchronized (shardFailuresMutex) {
|
||||
if (shardFailures == null) {
|
||||
shardFailures = new AtomicArray<>(shardsIts.size());
|
||||
}
|
||||
}
|
||||
}
|
||||
ShardSearchFailure failure = shardFailures.get(shardIndex);
|
||||
if (failure == null) {
|
||||
shardFailures.set(shardIndex, new ShardSearchFailure(t, shardTarget));
|
||||
} else {
|
||||
// the failure is already present, try and not override it with an exception that is less meaningless
|
||||
// for example, getting illegal shard state
|
||||
if (TransportActions.isReadOverrideException(t)) {
|
||||
shardFailures.set(shardIndex, new ShardSearchFailure(t, shardTarget));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void raiseEarlyFailure(Throwable t) {
|
||||
for (AtomicArray.Entry<FirstResult> entry : firstResults.asList()) {
|
||||
try {
|
||||
DiscoveryNode node = nodes.get(entry.value.shardTarget().nodeId());
|
||||
sendReleaseSearchContext(entry.value.id(), node);
|
||||
} catch (Throwable t1) {
|
||||
logger.trace("failed to release context", t1);
|
||||
}
|
||||
}
|
||||
listener.onFailure(t);
|
||||
}
|
||||
|
||||
/**
|
||||
* Releases shard targets that are not used in the docsIdsToLoad.
|
||||
*/
|
||||
protected void releaseIrrelevantSearchContexts(AtomicArray<? extends QuerySearchResultProvider> queryResults,
|
||||
AtomicArray<IntArrayList> docIdsToLoad) {
|
||||
if (docIdsToLoad == null) {
|
||||
return;
|
||||
}
|
||||
// we only release search context that we did not fetch from if we are not scrolling
|
||||
if (request.scroll() == null) {
|
||||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : queryResults.asList()) {
|
||||
final TopDocs topDocs = entry.value.queryResult().queryResult().topDocs();
|
||||
if (topDocs != null && topDocs.scoreDocs.length > 0 // the shard had matches
|
||||
&& docIdsToLoad.get(entry.index) == null) { // but none of them made it to the global top docs
|
||||
try {
|
||||
DiscoveryNode node = nodes.get(entry.value.queryResult().shardTarget().nodeId());
|
||||
sendReleaseSearchContext(entry.value.queryResult().id(), node);
|
||||
} catch (Throwable t1) {
|
||||
logger.trace("failed to release context", t1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected void sendReleaseSearchContext(long contextId, DiscoveryNode node) {
|
||||
if (node != null) {
|
||||
searchTransportService.sendFreeContext(node, contextId, request);
|
||||
}
|
||||
}
|
||||
|
||||
protected ShardFetchSearchRequest createFetchRequest(QuerySearchResult queryResult, AtomicArray.Entry<IntArrayList> entry,
|
||||
ScoreDoc[] lastEmittedDocPerShard) {
|
||||
if (lastEmittedDocPerShard != null) {
|
||||
ScoreDoc lastEmittedDoc = lastEmittedDocPerShard[entry.index];
|
||||
return new ShardFetchSearchRequest(request, queryResult.id(), entry.value, lastEmittedDoc);
|
||||
} else {
|
||||
return new ShardFetchSearchRequest(request, queryResult.id(), entry.value);
|
||||
}
|
||||
}
|
||||
|
||||
protected abstract void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request,
|
||||
ActionListener<FirstResult> listener);
|
||||
|
||||
protected final void processFirstPhaseResult(int shardIndex, FirstResult result) {
|
||||
firstResults.set(shardIndex, result);
|
||||
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("got first-phase result from {}", result != null ? result.shardTarget() : null);
|
||||
}
|
||||
|
||||
// clean a previous error on this shard group (note, this code will be serialized on the same shardIndex value level
|
||||
// so its ok concurrency wise to miss potentially the shard failures being created because of another failure
|
||||
// in the #addShardFailure, because by definition, it will happen on *another* shardIndex
|
||||
AtomicArray<ShardSearchFailure> shardFailures = this.shardFailures;
|
||||
if (shardFailures != null) {
|
||||
shardFailures.set(shardIndex, null);
|
||||
}
|
||||
}
|
||||
|
||||
final void innerMoveToSecondPhase() throws Exception {
|
||||
if (logger.isTraceEnabled()) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
boolean hadOne = false;
|
||||
for (int i = 0; i < firstResults.length(); i++) {
|
||||
FirstResult result = firstResults.get(i);
|
||||
if (result == null) {
|
||||
continue; // failure
|
||||
}
|
||||
if (hadOne) {
|
||||
sb.append(",");
|
||||
} else {
|
||||
hadOne = true;
|
||||
}
|
||||
sb.append(result.shardTarget());
|
||||
}
|
||||
|
||||
logger.trace("Moving to second phase, based on results from: {} (cluster state version: {})", sb, clusterState.version());
|
||||
}
|
||||
moveToSecondPhase();
|
||||
}
|
||||
|
||||
protected abstract void moveToSecondPhase() throws Exception;
|
||||
|
||||
protected abstract String firstPhaseName();
|
||||
}
|
|
@ -17,14 +17,14 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search.type;
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class ParsedScrollId {
|
||||
class ParsedScrollId {
|
||||
|
||||
public static final String QUERY_THEN_FETCH_TYPE = "queryThenFetch";
|
||||
|
|
@ -17,9 +17,9 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search.type;
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
public class ScrollIdForNode {
|
||||
class ScrollIdForNode {
|
||||
private final String node;
|
||||
private final long scrollId;
|
||||
|
|
@ -0,0 +1,143 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRunnable;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.dfs.AggregatedDfs;
|
||||
import org.elasticsearch.search.dfs.DfsSearchResult;
|
||||
import org.elasticsearch.search.fetch.QueryFetchSearchResult;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.search.query.QuerySearchRequest;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<DfsSearchResult> {
|
||||
|
||||
private final AtomicArray<QueryFetchSearchResult> queryFetchResults;
|
||||
|
||||
SearchDfsQueryAndFetchAsyncAction(ESLogger logger, SearchTransportService searchTransportService,
|
||||
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
SearchPhaseController searchPhaseController, ThreadPool threadPool,
|
||||
SearchRequest request, ActionListener<SearchResponse> listener) {
|
||||
super(logger, searchTransportService, clusterService, indexNameExpressionResolver, searchPhaseController, threadPool,
|
||||
request, listener);
|
||||
queryFetchResults = new AtomicArray<>(firstResults.length());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String firstPhaseName() {
|
||||
return "dfs";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request,
|
||||
ActionListener<DfsSearchResult> listener) {
|
||||
searchTransportService.sendExecuteDfs(node, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void moveToSecondPhase() {
|
||||
final AggregatedDfs dfs = searchPhaseController.aggregateDfs(firstResults);
|
||||
final AtomicInteger counter = new AtomicInteger(firstResults.asList().size());
|
||||
|
||||
for (final AtomicArray.Entry<DfsSearchResult> entry : firstResults.asList()) {
|
||||
DfsSearchResult dfsResult = entry.value;
|
||||
DiscoveryNode node = nodes.get(dfsResult.shardTarget().nodeId());
|
||||
QuerySearchRequest querySearchRequest = new QuerySearchRequest(request, dfsResult.id(), dfs);
|
||||
executeSecondPhase(entry.index, dfsResult, counter, node, querySearchRequest);
|
||||
}
|
||||
}
|
||||
|
||||
void executeSecondPhase(final int shardIndex, final DfsSearchResult dfsResult, final AtomicInteger counter,
|
||||
final DiscoveryNode node, final QuerySearchRequest querySearchRequest) {
|
||||
searchTransportService.sendExecuteFetch(node, querySearchRequest, new ActionListener<QueryFetchSearchResult>() {
|
||||
@Override
|
||||
public void onResponse(QueryFetchSearchResult result) {
|
||||
result.shardTarget(dfsResult.shardTarget());
|
||||
queryFetchResults.set(shardIndex, result);
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
try {
|
||||
onSecondPhaseFailure(t, querySearchRequest, shardIndex, dfsResult, counter);
|
||||
} finally {
|
||||
// the query might not have been executed at all (for example because thread pool rejected execution)
|
||||
// and the search context that was created in dfs phase might not be released.
|
||||
// release it again to be in the safe side
|
||||
sendReleaseSearchContext(querySearchRequest.id(), node);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void onSecondPhaseFailure(Throwable t, QuerySearchRequest querySearchRequest, int shardIndex, DfsSearchResult dfsResult,
|
||||
AtomicInteger counter) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] Failed to execute query phase", t, querySearchRequest.id());
|
||||
}
|
||||
this.addShardFailure(shardIndex, dfsResult.shardTarget(), t);
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
private void finishHim() {
|
||||
threadPool.executor(ThreadPool.Names.SEARCH).execute(new ActionRunnable<SearchResponse>(listener) {
|
||||
@Override
|
||||
public void doRun() throws IOException {
|
||||
sortedShardList = searchPhaseController.sortDocs(true, queryFetchResults);
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryFetchResults,
|
||||
queryFetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(),
|
||||
buildTookInMillis(), buildShardFailures()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
ReduceSearchPhaseException failure = new ReduceSearchPhaseException("query_fetch", "", t, buildShardFailures());
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("failed to reduce search", failure);
|
||||
}
|
||||
super.onFailure(t);
|
||||
}
|
||||
});
|
||||
|
||||
}
|
||||
}
|
|
@ -0,0 +1,224 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRunnable;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.dfs.AggregatedDfs;
|
||||
import org.elasticsearch.search.dfs.DfsSearchResult;
|
||||
import org.elasticsearch.search.fetch.FetchSearchResult;
|
||||
import org.elasticsearch.search.fetch.ShardFetchSearchRequest;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.search.query.QuerySearchRequest;
|
||||
import org.elasticsearch.search.query.QuerySearchResult;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<DfsSearchResult> {
|
||||
|
||||
final AtomicArray<QuerySearchResult> queryResults;
|
||||
final AtomicArray<FetchSearchResult> fetchResults;
|
||||
final AtomicArray<IntArrayList> docIdsToLoad;
|
||||
|
||||
SearchDfsQueryThenFetchAsyncAction(ESLogger logger, SearchTransportService searchTransportService,
|
||||
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
SearchPhaseController searchPhaseController, ThreadPool threadPool,
|
||||
SearchRequest request, ActionListener<SearchResponse> listener) {
|
||||
super(logger, searchTransportService, clusterService, indexNameExpressionResolver, searchPhaseController, threadPool,
|
||||
request, listener);
|
||||
queryResults = new AtomicArray<>(firstResults.length());
|
||||
fetchResults = new AtomicArray<>(firstResults.length());
|
||||
docIdsToLoad = new AtomicArray<>(firstResults.length());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String firstPhaseName() {
|
||||
return "dfs";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request,
|
||||
ActionListener<DfsSearchResult> listener) {
|
||||
searchTransportService.sendExecuteDfs(node, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void moveToSecondPhase() {
|
||||
final AggregatedDfs dfs = searchPhaseController.aggregateDfs(firstResults);
|
||||
final AtomicInteger counter = new AtomicInteger(firstResults.asList().size());
|
||||
for (final AtomicArray.Entry<DfsSearchResult> entry : firstResults.asList()) {
|
||||
DfsSearchResult dfsResult = entry.value;
|
||||
DiscoveryNode node = nodes.get(dfsResult.shardTarget().nodeId());
|
||||
QuerySearchRequest querySearchRequest = new QuerySearchRequest(request, dfsResult.id(), dfs);
|
||||
executeQuery(entry.index, dfsResult, counter, querySearchRequest, node);
|
||||
}
|
||||
}
|
||||
|
||||
void executeQuery(final int shardIndex, final DfsSearchResult dfsResult, final AtomicInteger counter,
|
||||
final QuerySearchRequest querySearchRequest, final DiscoveryNode node) {
|
||||
searchTransportService.sendExecuteQuery(node, querySearchRequest, new ActionListener<QuerySearchResult>() {
|
||||
@Override
|
||||
public void onResponse(QuerySearchResult result) {
|
||||
result.shardTarget(dfsResult.shardTarget());
|
||||
queryResults.set(shardIndex, result);
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
executeFetchPhase();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
try {
|
||||
onQueryFailure(t, querySearchRequest, shardIndex, dfsResult, counter);
|
||||
} finally {
|
||||
// the query might not have been executed at all (for example because thread pool rejected
|
||||
// execution) and the search context that was created in dfs phase might not be released.
|
||||
// release it again to be in the safe side
|
||||
sendReleaseSearchContext(querySearchRequest.id(), node);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void onQueryFailure(Throwable t, QuerySearchRequest querySearchRequest, int shardIndex, DfsSearchResult dfsResult,
|
||||
AtomicInteger counter) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] Failed to execute query phase", t, querySearchRequest.id());
|
||||
}
|
||||
this.addShardFailure(shardIndex, dfsResult.shardTarget(), t);
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
if (successfulOps.get() == 0) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "all shards failed", buildShardFailures()));
|
||||
} else {
|
||||
executeFetchPhase();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void executeFetchPhase() {
|
||||
try {
|
||||
innerExecuteFetchPhase();
|
||||
} catch (Throwable e) {
|
||||
listener.onFailure(new ReduceSearchPhaseException("query", "", e, buildShardFailures()));
|
||||
}
|
||||
}
|
||||
|
||||
void innerExecuteFetchPhase() throws Exception {
|
||||
boolean useScroll = request.scroll() != null;
|
||||
sortedShardList = searchPhaseController.sortDocs(useScroll, queryResults);
|
||||
searchPhaseController.fillDocIdsToLoad(docIdsToLoad, sortedShardList);
|
||||
|
||||
if (docIdsToLoad.asList().isEmpty()) {
|
||||
finishHim();
|
||||
return;
|
||||
}
|
||||
|
||||
final ScoreDoc[] lastEmittedDocPerShard = searchPhaseController.getLastEmittedDocPerShard(
|
||||
request, sortedShardList, firstResults.length()
|
||||
);
|
||||
final AtomicInteger counter = new AtomicInteger(docIdsToLoad.asList().size());
|
||||
for (final AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
|
||||
QuerySearchResult queryResult = queryResults.get(entry.index);
|
||||
DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId());
|
||||
ShardFetchSearchRequest fetchSearchRequest = createFetchRequest(queryResult, entry, lastEmittedDocPerShard);
|
||||
executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node);
|
||||
}
|
||||
}
|
||||
|
||||
void executeFetch(final int shardIndex, final SearchShardTarget shardTarget, final AtomicInteger counter,
|
||||
final ShardFetchSearchRequest fetchSearchRequest, DiscoveryNode node) {
|
||||
searchTransportService.sendExecuteFetch(node, fetchSearchRequest, new ActionListener<FetchSearchResult>() {
|
||||
@Override
|
||||
public void onResponse(FetchSearchResult result) {
|
||||
result.shardTarget(shardTarget);
|
||||
fetchResults.set(shardIndex, result);
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
// the search context might not be cleared on the node where the fetch was executed for example
|
||||
// because the action was rejected by the thread pool. in this case we need to send a dedicated
|
||||
// request to clear the search context. by setting docIdsToLoad to null, the context will be cleared
|
||||
// in TransportSearchTypeAction.releaseIrrelevantSearchContexts() after the search request is done.
|
||||
docIdsToLoad.set(shardIndex, null);
|
||||
onFetchFailure(t, fetchSearchRequest, shardIndex, shardTarget, counter);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void onFetchFailure(Throwable t, ShardFetchSearchRequest fetchSearchRequest, int shardIndex,
|
||||
SearchShardTarget shardTarget, AtomicInteger counter) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] Failed to execute fetch phase", t, fetchSearchRequest.id());
|
||||
}
|
||||
this.addShardFailure(shardIndex, shardTarget, t);
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
private void finishHim() {
|
||||
threadPool.executor(ThreadPool.Names.SEARCH).execute(new ActionRunnable<SearchResponse>(listener) {
|
||||
@Override
|
||||
public void doRun() throws IOException {
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryResults,
|
||||
fetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(),
|
||||
buildTookInMillis(), buildShardFailures()));
|
||||
releaseIrrelevantSearchContexts(queryResults, docIdsToLoad);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
try {
|
||||
ReduceSearchPhaseException failure = new ReduceSearchPhaseException("merge", "", t, buildShardFailures());
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("failed to reduce search", failure);
|
||||
}
|
||||
super.onFailure(failure);
|
||||
} finally {
|
||||
releaseIrrelevantSearchContexts(queryResults, docIdsToLoad);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
|
@ -0,0 +1,85 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRunnable;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.fetch.QueryFetchSearchResult;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
class SearchQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<QueryFetchSearchResult> {
|
||||
|
||||
SearchQueryAndFetchAsyncAction(ESLogger logger, SearchTransportService searchTransportService,
|
||||
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
SearchPhaseController searchPhaseController, ThreadPool threadPool,
|
||||
SearchRequest request, ActionListener<SearchResponse> listener) {
|
||||
super(logger, searchTransportService, clusterService, indexNameExpressionResolver, searchPhaseController, threadPool,
|
||||
request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String firstPhaseName() {
|
||||
return "query_fetch";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request,
|
||||
ActionListener<QueryFetchSearchResult> listener) {
|
||||
searchTransportService.sendExecuteFetch(node, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void moveToSecondPhase() throws Exception {
|
||||
threadPool.executor(ThreadPool.Names.SEARCH).execute(new ActionRunnable<SearchResponse>(listener) {
|
||||
@Override
|
||||
public void doRun() throws IOException {
|
||||
boolean useScroll = request.scroll() != null;
|
||||
sortedShardList = searchPhaseController.sortDocs(useScroll, firstResults);
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, firstResults,
|
||||
firstResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(),
|
||||
buildTookInMillis(), buildShardFailures()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
ReduceSearchPhaseException failure = new ReduceSearchPhaseException("merge", "", t, buildShardFailures());
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("failed to reduce search", failure);
|
||||
}
|
||||
super.onFailure(failure);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
|
@ -0,0 +1,157 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRunnable;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.fetch.FetchSearchResult;
|
||||
import org.elasticsearch.search.fetch.ShardFetchSearchRequest;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.search.query.QuerySearchResultProvider;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<QuerySearchResultProvider> {
|
||||
|
||||
final AtomicArray<FetchSearchResult> fetchResults;
|
||||
final AtomicArray<IntArrayList> docIdsToLoad;
|
||||
|
||||
SearchQueryThenFetchAsyncAction(ESLogger logger, SearchTransportService searchService,
|
||||
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
SearchPhaseController searchPhaseController, ThreadPool threadPool,
|
||||
SearchRequest request, ActionListener<SearchResponse> listener) {
|
||||
super(logger, searchService, clusterService, indexNameExpressionResolver, searchPhaseController, threadPool, request, listener);
|
||||
fetchResults = new AtomicArray<>(firstResults.length());
|
||||
docIdsToLoad = new AtomicArray<>(firstResults.length());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String firstPhaseName() {
|
||||
return "query";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request,
|
||||
ActionListener<QuerySearchResultProvider> listener) {
|
||||
searchTransportService.sendExecuteQuery(node, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void moveToSecondPhase() throws Exception {
|
||||
boolean useScroll = request.scroll() != null;
|
||||
sortedShardList = searchPhaseController.sortDocs(useScroll, firstResults);
|
||||
searchPhaseController.fillDocIdsToLoad(docIdsToLoad, sortedShardList);
|
||||
|
||||
if (docIdsToLoad.asList().isEmpty()) {
|
||||
finishHim();
|
||||
return;
|
||||
}
|
||||
|
||||
final ScoreDoc[] lastEmittedDocPerShard = searchPhaseController.getLastEmittedDocPerShard(
|
||||
request, sortedShardList, firstResults.length()
|
||||
);
|
||||
final AtomicInteger counter = new AtomicInteger(docIdsToLoad.asList().size());
|
||||
for (AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
|
||||
QuerySearchResultProvider queryResult = firstResults.get(entry.index);
|
||||
DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId());
|
||||
ShardFetchSearchRequest fetchSearchRequest = createFetchRequest(queryResult.queryResult(), entry, lastEmittedDocPerShard);
|
||||
executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node);
|
||||
}
|
||||
}
|
||||
|
||||
void executeFetch(final int shardIndex, final SearchShardTarget shardTarget, final AtomicInteger counter,
|
||||
final ShardFetchSearchRequest fetchSearchRequest, DiscoveryNode node) {
|
||||
searchTransportService.sendExecuteFetch(node, fetchSearchRequest, new ActionListener<FetchSearchResult>() {
|
||||
@Override
|
||||
public void onResponse(FetchSearchResult result) {
|
||||
result.shardTarget(shardTarget);
|
||||
fetchResults.set(shardIndex, result);
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
// the search context might not be cleared on the node where the fetch was executed for example
|
||||
// because the action was rejected by the thread pool. in this case we need to send a dedicated
|
||||
// request to clear the search context. by setting docIdsToLoad to null, the context will be cleared
|
||||
// in TransportSearchTypeAction.releaseIrrelevantSearchContexts() after the search request is done.
|
||||
docIdsToLoad.set(shardIndex, null);
|
||||
onFetchFailure(t, fetchSearchRequest, shardIndex, shardTarget, counter);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void onFetchFailure(Throwable t, ShardFetchSearchRequest fetchSearchRequest, int shardIndex, SearchShardTarget shardTarget,
|
||||
AtomicInteger counter) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] Failed to execute fetch phase", t, fetchSearchRequest.id());
|
||||
}
|
||||
this.addShardFailure(shardIndex, shardTarget, t);
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
private void finishHim() {
|
||||
threadPool.executor(ThreadPool.Names.SEARCH).execute(new ActionRunnable<SearchResponse>(listener) {
|
||||
@Override
|
||||
public void doRun() throws IOException {
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, firstResults,
|
||||
fetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps,
|
||||
successfulOps.get(), buildTookInMillis(), buildShardFailures()));
|
||||
releaseIrrelevantSearchContexts(firstResults, docIdsToLoad);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
try {
|
||||
ReduceSearchPhaseException failure = new ReduceSearchPhaseException("fetch", "", t, buildShardFailures());
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("failed to reduce search", failure);
|
||||
}
|
||||
super.onFailure(failure);
|
||||
} finally {
|
||||
releaseIrrelevantSearchContexts(firstResults, docIdsToLoad);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
|
@ -0,0 +1,181 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.fetch.QueryFetchSearchResult;
|
||||
import org.elasticsearch.search.fetch.ScrollQueryFetchSearchResult;
|
||||
import org.elasticsearch.search.internal.InternalScrollSearchRequest;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import static org.elasticsearch.action.search.TransportSearchHelper.internalScrollSearchRequest;
|
||||
|
||||
class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction {
|
||||
|
||||
private final ESLogger logger;
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
private final SearchTransportService searchTransportService;
|
||||
private final SearchScrollRequest request;
|
||||
private final ActionListener<SearchResponse> listener;
|
||||
private final ParsedScrollId scrollId;
|
||||
private final DiscoveryNodes nodes;
|
||||
private volatile AtomicArray<ShardSearchFailure> shardFailures;
|
||||
private final AtomicArray<QueryFetchSearchResult> queryFetchResults;
|
||||
private final AtomicInteger successfulOps;
|
||||
private final AtomicInteger counter;
|
||||
|
||||
SearchScrollQueryAndFetchAsyncAction(ESLogger logger, ClusterService clusterService,
|
||||
SearchTransportService searchTransportService, SearchPhaseController searchPhaseController,
|
||||
SearchScrollRequest request, ParsedScrollId scrollId, ActionListener<SearchResponse> listener) {
|
||||
this.logger = logger;
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
this.searchTransportService = searchTransportService;
|
||||
this.request = request;
|
||||
this.listener = listener;
|
||||
this.scrollId = scrollId;
|
||||
this.nodes = clusterService.state().nodes();
|
||||
this.successfulOps = new AtomicInteger(scrollId.getContext().length);
|
||||
this.counter = new AtomicInteger(scrollId.getContext().length);
|
||||
|
||||
this.queryFetchResults = new AtomicArray<>(scrollId.getContext().length);
|
||||
}
|
||||
|
||||
protected final ShardSearchFailure[] buildShardFailures() {
|
||||
if (shardFailures == null) {
|
||||
return ShardSearchFailure.EMPTY_ARRAY;
|
||||
}
|
||||
List<AtomicArray.Entry<ShardSearchFailure>> entries = shardFailures.asList();
|
||||
ShardSearchFailure[] failures = new ShardSearchFailure[entries.size()];
|
||||
for (int i = 0; i < failures.length; i++) {
|
||||
failures[i] = entries.get(i).value;
|
||||
}
|
||||
return failures;
|
||||
}
|
||||
|
||||
// we do our best to return the shard failures, but its ok if its not fully concurrently safe
|
||||
// we simply try and return as much as possible
|
||||
protected final void addShardFailure(final int shardIndex, ShardSearchFailure failure) {
|
||||
if (shardFailures == null) {
|
||||
shardFailures = new AtomicArray<>(scrollId.getContext().length);
|
||||
}
|
||||
shardFailures.set(shardIndex, failure);
|
||||
}
|
||||
|
||||
public void start() {
|
||||
if (scrollId.getContext().length == 0) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "no nodes to search on", ShardSearchFailure.EMPTY_ARRAY));
|
||||
return;
|
||||
}
|
||||
|
||||
ScrollIdForNode[] context = scrollId.getContext();
|
||||
for (int i = 0; i < context.length; i++) {
|
||||
ScrollIdForNode target = context[i];
|
||||
DiscoveryNode node = nodes.get(target.getNode());
|
||||
if (node != null) {
|
||||
executePhase(i, node, target.getScrollId());
|
||||
} else {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Node [" + target.getNode() + "] not available for scroll request [" + scrollId.getSource() + "]");
|
||||
}
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (ScrollIdForNode target : scrollId.getContext()) {
|
||||
DiscoveryNode node = nodes.get(target.getNode());
|
||||
if (node == null) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Node [" + target.getNode() + "] not available for scroll request [" + scrollId.getSource() + "]");
|
||||
}
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void executePhase(final int shardIndex, DiscoveryNode node, final long searchId) {
|
||||
InternalScrollSearchRequest internalRequest = internalScrollSearchRequest(searchId, request);
|
||||
searchTransportService.sendExecuteFetch(node, internalRequest, new ActionListener<ScrollQueryFetchSearchResult>() {
|
||||
@Override
|
||||
public void onResponse(ScrollQueryFetchSearchResult result) {
|
||||
queryFetchResults.set(shardIndex, result.result());
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
onPhaseFailure(t, searchId, shardIndex);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private void onPhaseFailure(Throwable t, long searchId, int shardIndex) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] Failed to execute query phase", t, searchId);
|
||||
}
|
||||
addShardFailure(shardIndex, new ShardSearchFailure(t));
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
if (successfulOps.get() == 0) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query_fetch", "all shards failed", t, buildShardFailures()));
|
||||
} else {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void finishHim() {
|
||||
try {
|
||||
innerFinishHim();
|
||||
} catch (Throwable e) {
|
||||
listener.onFailure(new ReduceSearchPhaseException("fetch", "", e, buildShardFailures()));
|
||||
}
|
||||
}
|
||||
|
||||
private void innerFinishHim() throws Exception {
|
||||
ScoreDoc[] sortedShardList = searchPhaseController.sortDocs(true, queryFetchResults);
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryFetchResults,
|
||||
queryFetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = request.scrollId();
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, this.scrollId.getContext().length, successfulOps.get(),
|
||||
buildTookInMillis(), buildShardFailures()));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,226 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.fetch.FetchSearchResult;
|
||||
import org.elasticsearch.search.fetch.ShardFetchRequest;
|
||||
import org.elasticsearch.search.internal.InternalScrollSearchRequest;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.query.QuerySearchResult;
|
||||
import org.elasticsearch.search.query.ScrollQuerySearchResult;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import static org.elasticsearch.action.search.TransportSearchHelper.internalScrollSearchRequest;
|
||||
|
||||
class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
|
||||
|
||||
private final ESLogger logger;
|
||||
private final SearchTransportService searchTransportService;
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
private final SearchScrollRequest request;
|
||||
private final ActionListener<SearchResponse> listener;
|
||||
private final ParsedScrollId scrollId;
|
||||
private final DiscoveryNodes nodes;
|
||||
private volatile AtomicArray<ShardSearchFailure> shardFailures;
|
||||
final AtomicArray<QuerySearchResult> queryResults;
|
||||
final AtomicArray<FetchSearchResult> fetchResults;
|
||||
private volatile ScoreDoc[] sortedShardList;
|
||||
private final AtomicInteger successfulOps;
|
||||
|
||||
SearchScrollQueryThenFetchAsyncAction(ESLogger logger, ClusterService clusterService,
|
||||
SearchTransportService searchTransportService, SearchPhaseController searchPhaseController,
|
||||
SearchScrollRequest request, ParsedScrollId scrollId, ActionListener<SearchResponse> listener) {
|
||||
this.logger = logger;
|
||||
this.searchTransportService = searchTransportService;
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
this.request = request;
|
||||
this.listener = listener;
|
||||
this.scrollId = scrollId;
|
||||
this.nodes = clusterService.state().nodes();
|
||||
this.successfulOps = new AtomicInteger(scrollId.getContext().length);
|
||||
this.queryResults = new AtomicArray<>(scrollId.getContext().length);
|
||||
this.fetchResults = new AtomicArray<>(scrollId.getContext().length);
|
||||
}
|
||||
|
||||
protected final ShardSearchFailure[] buildShardFailures() {
|
||||
if (shardFailures == null) {
|
||||
return ShardSearchFailure.EMPTY_ARRAY;
|
||||
}
|
||||
List<AtomicArray.Entry<ShardSearchFailure>> entries = shardFailures.asList();
|
||||
ShardSearchFailure[] failures = new ShardSearchFailure[entries.size()];
|
||||
for (int i = 0; i < failures.length; i++) {
|
||||
failures[i] = entries.get(i).value;
|
||||
}
|
||||
return failures;
|
||||
}
|
||||
|
||||
// we do our best to return the shard failures, but its ok if its not fully concurrently safe
|
||||
// we simply try and return as much as possible
|
||||
protected final void addShardFailure(final int shardIndex, ShardSearchFailure failure) {
|
||||
if (shardFailures == null) {
|
||||
shardFailures = new AtomicArray<>(scrollId.getContext().length);
|
||||
}
|
||||
shardFailures.set(shardIndex, failure);
|
||||
}
|
||||
|
||||
public void start() {
|
||||
if (scrollId.getContext().length == 0) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "no nodes to search on", ShardSearchFailure.EMPTY_ARRAY));
|
||||
return;
|
||||
}
|
||||
final AtomicInteger counter = new AtomicInteger(scrollId.getContext().length);
|
||||
|
||||
ScrollIdForNode[] context = scrollId.getContext();
|
||||
for (int i = 0; i < context.length; i++) {
|
||||
ScrollIdForNode target = context[i];
|
||||
DiscoveryNode node = nodes.get(target.getNode());
|
||||
if (node != null) {
|
||||
executeQueryPhase(i, counter, node, target.getScrollId());
|
||||
} else {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Node [" + target.getNode() + "] not available for scroll request [" + scrollId.getSource() + "]");
|
||||
}
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
try {
|
||||
executeFetchPhase();
|
||||
} catch (Throwable e) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "Fetch failed", e, ShardSearchFailure.EMPTY_ARRAY));
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void executeQueryPhase(final int shardIndex, final AtomicInteger counter, DiscoveryNode node, final long searchId) {
|
||||
InternalScrollSearchRequest internalRequest = internalScrollSearchRequest(searchId, request);
|
||||
searchTransportService.sendExecuteQuery(node, internalRequest, new ActionListener<ScrollQuerySearchResult>() {
|
||||
@Override
|
||||
public void onResponse(ScrollQuerySearchResult result) {
|
||||
queryResults.set(shardIndex, result.queryResult());
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
try {
|
||||
executeFetchPhase();
|
||||
} catch (Throwable e) {
|
||||
onFailure(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
onQueryPhaseFailure(shardIndex, counter, searchId, t);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void onQueryPhaseFailure(final int shardIndex, final AtomicInteger counter, final long searchId, Throwable t) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] Failed to execute query phase", t, searchId);
|
||||
}
|
||||
addShardFailure(shardIndex, new ShardSearchFailure(t));
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
if (successfulOps.get() == 0) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "all shards failed", t, buildShardFailures()));
|
||||
} else {
|
||||
try {
|
||||
executeFetchPhase();
|
||||
} catch (Throwable e) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "Fetch failed", e, ShardSearchFailure.EMPTY_ARRAY));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void executeFetchPhase() throws Exception {
|
||||
sortedShardList = searchPhaseController.sortDocs(true, queryResults);
|
||||
AtomicArray<IntArrayList> docIdsToLoad = new AtomicArray<>(queryResults.length());
|
||||
searchPhaseController.fillDocIdsToLoad(docIdsToLoad, sortedShardList);
|
||||
|
||||
if (docIdsToLoad.asList().isEmpty()) {
|
||||
finishHim();
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
final ScoreDoc[] lastEmittedDocPerShard = searchPhaseController.getLastEmittedDocPerShard(sortedShardList, queryResults.length());
|
||||
final AtomicInteger counter = new AtomicInteger(docIdsToLoad.asList().size());
|
||||
for (final AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
|
||||
IntArrayList docIds = entry.value;
|
||||
final QuerySearchResult querySearchResult = queryResults.get(entry.index);
|
||||
ScoreDoc lastEmittedDoc = lastEmittedDocPerShard[entry.index];
|
||||
ShardFetchRequest shardFetchRequest = new ShardFetchRequest(querySearchResult.id(), docIds, lastEmittedDoc);
|
||||
DiscoveryNode node = nodes.get(querySearchResult.shardTarget().nodeId());
|
||||
searchTransportService.sendExecuteFetchScroll(node, shardFetchRequest, new ActionListener<FetchSearchResult>() {
|
||||
@Override
|
||||
public void onResponse(FetchSearchResult result) {
|
||||
result.shardTarget(querySearchResult.shardTarget());
|
||||
fetchResults.set(entry.index, result);
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Failed to execute fetch phase", t);
|
||||
}
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
private void finishHim() {
|
||||
try {
|
||||
innerFinishHim();
|
||||
} catch (Throwable e) {
|
||||
listener.onFailure(new ReduceSearchPhaseException("fetch", "", e, buildShardFailures()));
|
||||
}
|
||||
}
|
||||
|
||||
private void innerFinishHim() {
|
||||
InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryResults, fetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = request.scrollId();
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, this.scrollId.getContext().length, successfulOps.get(),
|
||||
buildTookInMillis(), buildShardFailures()));
|
||||
}
|
||||
}
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.search.type.ScrollIdForNode;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
|
@ -31,7 +30,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.CountDown;
|
||||
import org.elasticsearch.search.action.SearchServiceTransportAction;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportResponse;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
@ -41,22 +40,22 @@ import java.util.List;
|
|||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static org.elasticsearch.action.search.type.TransportSearchHelper.parseScrollId;
|
||||
import static org.elasticsearch.action.search.TransportSearchHelper.parseScrollId;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class TransportClearScrollAction extends HandledTransportAction<ClearScrollRequest, ClearScrollResponse> {
|
||||
|
||||
private final ClusterService clusterService;
|
||||
private final SearchServiceTransportAction searchServiceTransportAction;
|
||||
private final SearchTransportService searchTransportService;
|
||||
|
||||
@Inject
|
||||
public TransportClearScrollAction(Settings settings, TransportService transportService, ThreadPool threadPool,
|
||||
ClusterService clusterService, SearchServiceTransportAction searchServiceTransportAction,
|
||||
ClusterService clusterService, SearchTransportService searchTransportService,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, ClearScrollAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, ClearScrollRequest::new);
|
||||
this.clusterService = clusterService;
|
||||
this.searchServiceTransportAction = searchServiceTransportAction;
|
||||
this.searchTransportService = searchTransportService;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -65,10 +64,8 @@ public class TransportClearScrollAction extends HandledTransportAction<ClearScro
|
|||
}
|
||||
|
||||
private class Async {
|
||||
|
||||
final DiscoveryNodes nodes;
|
||||
final CountDown expectedOps;
|
||||
final ClearScrollRequest request;
|
||||
final List<ScrollIdForNode[]> contexts = new ArrayList<>();
|
||||
final ActionListener<ClearScrollResponse> listener;
|
||||
final AtomicReference<Throwable> expHolder;
|
||||
|
@ -86,8 +83,6 @@ public class TransportClearScrollAction extends HandledTransportAction<ClearScro
|
|||
this.contexts.add(context);
|
||||
}
|
||||
}
|
||||
|
||||
this.request = request;
|
||||
this.listener = listener;
|
||||
this.expHolder = new AtomicReference<>();
|
||||
this.expectedOps = new CountDown(expectedOps);
|
||||
|
@ -101,7 +96,7 @@ public class TransportClearScrollAction extends HandledTransportAction<ClearScro
|
|||
|
||||
if (contexts.isEmpty()) {
|
||||
for (final DiscoveryNode node : nodes) {
|
||||
searchServiceTransportAction.sendClearAllScrollContexts(node, request, new ActionListener<TransportResponse>() {
|
||||
searchTransportService.sendClearAllScrollContexts(node, new ActionListener<TransportResponse>() {
|
||||
@Override
|
||||
public void onResponse(TransportResponse response) {
|
||||
onFreedContext(true);
|
||||
|
@ -122,9 +117,9 @@ public class TransportClearScrollAction extends HandledTransportAction<ClearScro
|
|||
continue;
|
||||
}
|
||||
|
||||
searchServiceTransportAction.sendFreeContext(node, target.getScrollId(), request, new ActionListener<SearchServiceTransportAction.SearchFreeContextResponse>() {
|
||||
searchTransportService.sendFreeContext(node, target.getScrollId(), new ActionListener<SearchTransportService.SearchFreeContextResponse>() {
|
||||
@Override
|
||||
public void onResponse(SearchServiceTransportAction.SearchFreeContextResponse freed) {
|
||||
public void onResponse(SearchTransportService.SearchFreeContextResponse freed) {
|
||||
onFreedContext(freed.isFreed());
|
||||
}
|
||||
|
||||
|
|
|
@ -20,10 +20,6 @@
|
|||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.search.type.TransportSearchDfsQueryAndFetchAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchDfsQueryThenFetchAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchQueryAndFetchAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchQueryThenFetchAction;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
|
@ -33,13 +29,14 @@ import org.elasticsearch.common.inject.Inject;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.indices.IndexClosedException;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.action.search.SearchType.DFS_QUERY_THEN_FETCH;
|
||||
import static org.elasticsearch.action.search.SearchType.QUERY_AND_FETCH;
|
||||
|
||||
/**
|
||||
|
@ -48,25 +45,18 @@ import static org.elasticsearch.action.search.SearchType.QUERY_AND_FETCH;
|
|||
public class TransportSearchAction extends HandledTransportAction<SearchRequest, SearchResponse> {
|
||||
|
||||
private final ClusterService clusterService;
|
||||
private final TransportSearchDfsQueryThenFetchAction dfsQueryThenFetchAction;
|
||||
private final TransportSearchQueryThenFetchAction queryThenFetchAction;
|
||||
private final TransportSearchDfsQueryAndFetchAction dfsQueryAndFetchAction;
|
||||
private final TransportSearchQueryAndFetchAction queryAndFetchAction;
|
||||
private final SearchTransportService searchTransportService;
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
|
||||
@Inject
|
||||
public TransportSearchAction(Settings settings, ThreadPool threadPool,
|
||||
TransportService transportService, ClusterService clusterService,
|
||||
TransportSearchDfsQueryThenFetchAction dfsQueryThenFetchAction,
|
||||
TransportSearchQueryThenFetchAction queryThenFetchAction,
|
||||
TransportSearchDfsQueryAndFetchAction dfsQueryAndFetchAction,
|
||||
TransportSearchQueryAndFetchAction queryAndFetchAction,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
public TransportSearchAction(Settings settings, ThreadPool threadPool, SearchPhaseController searchPhaseController,
|
||||
TransportService transportService, SearchTransportService searchTransportService,
|
||||
ClusterService clusterService, ActionFilters actionFilters, IndexNameExpressionResolver
|
||||
indexNameExpressionResolver) {
|
||||
super(settings, SearchAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, SearchRequest::new);
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
this.searchTransportService = searchTransportService;
|
||||
this.clusterService = clusterService;
|
||||
this.dfsQueryThenFetchAction = dfsQueryThenFetchAction;
|
||||
this.queryThenFetchAction = queryThenFetchAction;
|
||||
this.dfsQueryAndFetchAction = dfsQueryAndFetchAction;
|
||||
this.queryAndFetchAction = queryAndFetchAction;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -75,7 +65,8 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
|||
try {
|
||||
ClusterState clusterState = clusterService.state();
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, searchRequest);
|
||||
Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, searchRequest.routing(), searchRequest.indices());
|
||||
Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState,
|
||||
searchRequest.routing(), searchRequest.indices());
|
||||
int shardCount = clusterService.operationRouting().searchShardsCount(clusterState, concreteIndices, routingMap);
|
||||
if (shardCount == 1) {
|
||||
// if we only have one group, then we always want Q_A_F, no need for DFS, and no need to do THEN since we hit one shard
|
||||
|
@ -86,16 +77,28 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
|||
} catch (Exception e) {
|
||||
logger.debug("failed to optimize search type, continue as normal", e);
|
||||
}
|
||||
if (searchRequest.searchType() == DFS_QUERY_THEN_FETCH) {
|
||||
dfsQueryThenFetchAction.execute(searchRequest, listener);
|
||||
} else if (searchRequest.searchType() == SearchType.QUERY_THEN_FETCH) {
|
||||
queryThenFetchAction.execute(searchRequest, listener);
|
||||
} else if (searchRequest.searchType() == SearchType.DFS_QUERY_AND_FETCH) {
|
||||
dfsQueryAndFetchAction.execute(searchRequest, listener);
|
||||
} else if (searchRequest.searchType() == SearchType.QUERY_AND_FETCH) {
|
||||
queryAndFetchAction.execute(searchRequest, listener);
|
||||
} else {
|
||||
throw new IllegalStateException("Unknown search type: [" + searchRequest.searchType() + "]");
|
||||
|
||||
AbstractSearchAsyncAction searchAsyncAction;
|
||||
switch(searchRequest.searchType()) {
|
||||
case DFS_QUERY_THEN_FETCH:
|
||||
searchAsyncAction = new SearchDfsQueryThenFetchAsyncAction(logger, searchTransportService, clusterService,
|
||||
indexNameExpressionResolver, searchPhaseController, threadPool, searchRequest, listener);
|
||||
break;
|
||||
case QUERY_THEN_FETCH:
|
||||
searchAsyncAction = new SearchQueryThenFetchAsyncAction(logger, searchTransportService, clusterService,
|
||||
indexNameExpressionResolver, searchPhaseController, threadPool, searchRequest, listener);
|
||||
break;
|
||||
case DFS_QUERY_AND_FETCH:
|
||||
searchAsyncAction = new SearchDfsQueryAndFetchAsyncAction(logger, searchTransportService, clusterService,
|
||||
indexNameExpressionResolver, searchPhaseController, threadPool, searchRequest, listener);
|
||||
break;
|
||||
case QUERY_AND_FETCH:
|
||||
searchAsyncAction = new SearchQueryAndFetchAsyncAction(logger, searchTransportService, clusterService,
|
||||
indexNameExpressionResolver, searchPhaseController, threadPool, searchRequest, listener);
|
||||
break;
|
||||
default:
|
||||
throw new IllegalStateException("Unknown search type: [" + searchRequest.searchType() + "]");
|
||||
}
|
||||
searchAsyncAction.start();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,13 +17,10 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search.type;
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.CharsRefBuilder;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchScrollRequest;
|
||||
import org.elasticsearch.action.search.SearchType;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.Base64;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
|
@ -42,17 +39,19 @@ import static java.util.Collections.emptyMap;
|
|||
/**
|
||||
*
|
||||
*/
|
||||
public abstract class TransportSearchHelper {
|
||||
final class TransportSearchHelper {
|
||||
|
||||
public static ShardSearchTransportRequest internalSearchRequest(ShardRouting shardRouting, int numberOfShards, SearchRequest request, String[] filteringAliases, long nowInMillis) {
|
||||
static ShardSearchTransportRequest internalSearchRequest(ShardRouting shardRouting, int numberOfShards, SearchRequest request,
|
||||
String[] filteringAliases, long nowInMillis) {
|
||||
return new ShardSearchTransportRequest(request, shardRouting, numberOfShards, filteringAliases, nowInMillis);
|
||||
}
|
||||
|
||||
public static InternalScrollSearchRequest internalScrollSearchRequest(long id, SearchScrollRequest request) {
|
||||
static InternalScrollSearchRequest internalScrollSearchRequest(long id, SearchScrollRequest request) {
|
||||
return new InternalScrollSearchRequest(request, id);
|
||||
}
|
||||
|
||||
public static String buildScrollId(SearchType searchType, AtomicArray<? extends SearchPhaseResult> searchPhaseResults, @Nullable Map<String, String> attributes) throws IOException {
|
||||
static String buildScrollId(SearchType searchType, AtomicArray<? extends SearchPhaseResult> searchPhaseResults,
|
||||
@Nullable Map<String, String> attributes) throws IOException {
|
||||
if (searchType == SearchType.DFS_QUERY_THEN_FETCH || searchType == SearchType.QUERY_THEN_FETCH) {
|
||||
return buildScrollId(ParsedScrollId.QUERY_THEN_FETCH_TYPE, searchPhaseResults, attributes);
|
||||
} else if (searchType == SearchType.QUERY_AND_FETCH || searchType == SearchType.DFS_QUERY_AND_FETCH) {
|
||||
|
@ -62,7 +61,8 @@ public abstract class TransportSearchHelper {
|
|||
}
|
||||
}
|
||||
|
||||
public static String buildScrollId(String type, AtomicArray<? extends SearchPhaseResult> searchPhaseResults, @Nullable Map<String, String> attributes) throws IOException {
|
||||
static String buildScrollId(String type, AtomicArray<? extends SearchPhaseResult> searchPhaseResults,
|
||||
@Nullable Map<String, String> attributes) throws IOException {
|
||||
StringBuilder sb = new StringBuilder().append(type).append(';');
|
||||
sb.append(searchPhaseResults.asList().size()).append(';');
|
||||
for (AtomicArray.Entry<? extends SearchPhaseResult> entry : searchPhaseResults.asList()) {
|
||||
|
@ -81,7 +81,7 @@ public abstract class TransportSearchHelper {
|
|||
return Base64.encodeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length, Base64.URL_SAFE);
|
||||
}
|
||||
|
||||
public static ParsedScrollId parseScrollId(String scrollId) {
|
||||
static ParsedScrollId parseScrollId(String scrollId) {
|
||||
CharsRefBuilder spare = new CharsRefBuilder();
|
||||
try {
|
||||
byte[] decode = Base64.decode(scrollId, Base64.URL_SAFE);
|
||||
|
@ -128,5 +128,4 @@ public abstract class TransportSearchHelper {
|
|||
private TransportSearchHelper() {
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -20,51 +20,60 @@
|
|||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.search.type.ParsedScrollId;
|
||||
import org.elasticsearch.action.search.type.TransportSearchScrollQueryAndFetchAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchScrollQueryThenFetchAction;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import static org.elasticsearch.action.search.type.ParsedScrollId.QUERY_AND_FETCH_TYPE;
|
||||
import static org.elasticsearch.action.search.type.ParsedScrollId.QUERY_THEN_FETCH_TYPE;
|
||||
import static org.elasticsearch.action.search.type.TransportSearchHelper.parseScrollId;
|
||||
import static org.elasticsearch.action.search.ParsedScrollId.QUERY_AND_FETCH_TYPE;
|
||||
import static org.elasticsearch.action.search.ParsedScrollId.QUERY_THEN_FETCH_TYPE;
|
||||
import static org.elasticsearch.action.search.TransportSearchHelper.parseScrollId;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class TransportSearchScrollAction extends HandledTransportAction<SearchScrollRequest, SearchResponse> {
|
||||
|
||||
private final TransportSearchScrollQueryThenFetchAction queryThenFetchAction;
|
||||
private final TransportSearchScrollQueryAndFetchAction queryAndFetchAction;
|
||||
private final ClusterService clusterService;
|
||||
private final SearchTransportService searchTransportService;
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
|
||||
@Inject
|
||||
public TransportSearchScrollAction(Settings settings, ThreadPool threadPool, TransportService transportService,
|
||||
TransportSearchScrollQueryThenFetchAction queryThenFetchAction,
|
||||
TransportSearchScrollQueryAndFetchAction queryAndFetchAction,
|
||||
ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, SearchScrollAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, SearchScrollRequest::new);
|
||||
this.queryThenFetchAction = queryThenFetchAction;
|
||||
this.queryAndFetchAction = queryAndFetchAction;
|
||||
ClusterService clusterService, SearchTransportService searchTransportService,
|
||||
SearchPhaseController searchPhaseController,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, SearchScrollAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver,
|
||||
SearchScrollRequest::new);
|
||||
this.clusterService = clusterService;
|
||||
this.searchTransportService = searchTransportService;
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(SearchScrollRequest request, ActionListener<SearchResponse> listener) {
|
||||
try {
|
||||
ParsedScrollId scrollId = parseScrollId(request.scrollId());
|
||||
if (scrollId.getType().equals(QUERY_THEN_FETCH_TYPE)) {
|
||||
queryThenFetchAction.execute(request, scrollId, listener);
|
||||
} else if (scrollId.getType().equals(QUERY_AND_FETCH_TYPE)) {
|
||||
queryAndFetchAction.execute(request, scrollId, listener);
|
||||
} else {
|
||||
throw new IllegalArgumentException("Scroll id type [" + scrollId.getType() + "] unrecognized");
|
||||
AbstractAsyncAction action;
|
||||
switch (scrollId.getType()) {
|
||||
case QUERY_THEN_FETCH_TYPE:
|
||||
action = new SearchScrollQueryThenFetchAsyncAction(logger, clusterService, searchTransportService,
|
||||
searchPhaseController, request, scrollId, listener);
|
||||
break;
|
||||
case QUERY_AND_FETCH_TYPE:
|
||||
action = new SearchScrollQueryAndFetchAsyncAction(logger, clusterService, searchTransportService,
|
||||
searchPhaseController, request, scrollId, listener);
|
||||
break;
|
||||
default:
|
||||
throw new IllegalArgumentException("Scroll id type [" + scrollId.getType() + "] unrecognized");
|
||||
}
|
||||
action.start();
|
||||
} catch (Throwable e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
|
|
|
@ -1,158 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search.type;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRunnable;
|
||||
import org.elasticsearch.action.search.ReduceSearchPhaseException;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.action.SearchServiceTransportAction;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.dfs.AggregatedDfs;
|
||||
import org.elasticsearch.search.dfs.DfsSearchResult;
|
||||
import org.elasticsearch.search.fetch.QueryFetchSearchResult;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.search.query.QuerySearchRequest;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class TransportSearchDfsQueryAndFetchAction extends TransportSearchTypeAction {
|
||||
|
||||
@Inject
|
||||
public TransportSearchDfsQueryAndFetchAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, threadPool, clusterService, searchService, searchPhaseController, actionFilters, indexNameExpressionResolver);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(SearchRequest searchRequest, ActionListener<SearchResponse> listener) {
|
||||
new AsyncAction(searchRequest, listener).start();
|
||||
}
|
||||
|
||||
private class AsyncAction extends BaseAsyncAction<DfsSearchResult> {
|
||||
|
||||
private final AtomicArray<QueryFetchSearchResult> queryFetchResults;
|
||||
|
||||
private AsyncAction(SearchRequest request, ActionListener<SearchResponse> listener) {
|
||||
super(request, listener);
|
||||
queryFetchResults = new AtomicArray<>(firstResults.length());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String firstPhaseName() {
|
||||
return "dfs";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request, ActionListener<DfsSearchResult> listener) {
|
||||
searchService.sendExecuteDfs(node, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void moveToSecondPhase() {
|
||||
final AggregatedDfs dfs = searchPhaseController.aggregateDfs(firstResults);
|
||||
final AtomicInteger counter = new AtomicInteger(firstResults.asList().size());
|
||||
|
||||
for (final AtomicArray.Entry<DfsSearchResult> entry : firstResults.asList()) {
|
||||
DfsSearchResult dfsResult = entry.value;
|
||||
DiscoveryNode node = nodes.get(dfsResult.shardTarget().nodeId());
|
||||
QuerySearchRequest querySearchRequest = new QuerySearchRequest(request, dfsResult.id(), dfs);
|
||||
executeSecondPhase(entry.index, dfsResult, counter, node, querySearchRequest);
|
||||
}
|
||||
}
|
||||
|
||||
void executeSecondPhase(final int shardIndex, final DfsSearchResult dfsResult, final AtomicInteger counter, final DiscoveryNode node, final QuerySearchRequest querySearchRequest) {
|
||||
searchService.sendExecuteFetch(node, querySearchRequest, new ActionListener<QueryFetchSearchResult>() {
|
||||
@Override
|
||||
public void onResponse(QueryFetchSearchResult result) {
|
||||
result.shardTarget(dfsResult.shardTarget());
|
||||
queryFetchResults.set(shardIndex, result);
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
try {
|
||||
onSecondPhaseFailure(t, querySearchRequest, shardIndex, dfsResult, counter);
|
||||
} finally {
|
||||
// the query might not have been executed at all (for example because thread pool rejected execution)
|
||||
// and the search context that was created in dfs phase might not be released.
|
||||
// release it again to be in the safe side
|
||||
sendReleaseSearchContext(querySearchRequest.id(), node);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void onSecondPhaseFailure(Throwable t, QuerySearchRequest querySearchRequest, int shardIndex, DfsSearchResult dfsResult, AtomicInteger counter) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] Failed to execute query phase", t, querySearchRequest.id());
|
||||
}
|
||||
this.addShardFailure(shardIndex, dfsResult.shardTarget(), t);
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
private void finishHim() {
|
||||
threadPool.executor(ThreadPool.Names.SEARCH).execute(new ActionRunnable<SearchResponse>(listener) {
|
||||
@Override
|
||||
public void doRun() throws IOException {
|
||||
sortedShardList = searchPhaseController.sortDocs(true, queryFetchResults);
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryFetchResults,
|
||||
queryFetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(), buildTookInMillis(), buildShardFailures()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
ReduceSearchPhaseException failure = new ReduceSearchPhaseException("query_fetch", "", t, buildShardFailures());
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("failed to reduce search", failure);
|
||||
}
|
||||
super.onFailure(t);
|
||||
}
|
||||
});
|
||||
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,239 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search.type;
|
||||
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRunnable;
|
||||
import org.elasticsearch.action.search.ReduceSearchPhaseException;
|
||||
import org.elasticsearch.action.search.SearchPhaseExecutionException;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.action.SearchServiceTransportAction;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.dfs.AggregatedDfs;
|
||||
import org.elasticsearch.search.dfs.DfsSearchResult;
|
||||
import org.elasticsearch.search.fetch.FetchSearchResult;
|
||||
import org.elasticsearch.search.fetch.ShardFetchSearchRequest;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.search.query.QuerySearchRequest;
|
||||
import org.elasticsearch.search.query.QuerySearchResult;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class TransportSearchDfsQueryThenFetchAction extends TransportSearchTypeAction {
|
||||
|
||||
@Inject
|
||||
public TransportSearchDfsQueryThenFetchAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, threadPool, clusterService, searchService, searchPhaseController, actionFilters, indexNameExpressionResolver);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(SearchRequest searchRequest, ActionListener<SearchResponse> listener) {
|
||||
new AsyncAction(searchRequest, listener).start();
|
||||
}
|
||||
|
||||
private class AsyncAction extends BaseAsyncAction<DfsSearchResult> {
|
||||
|
||||
final AtomicArray<QuerySearchResult> queryResults;
|
||||
final AtomicArray<FetchSearchResult> fetchResults;
|
||||
final AtomicArray<IntArrayList> docIdsToLoad;
|
||||
|
||||
private AsyncAction(SearchRequest request, ActionListener<SearchResponse> listener) {
|
||||
super(request, listener);
|
||||
queryResults = new AtomicArray<>(firstResults.length());
|
||||
fetchResults = new AtomicArray<>(firstResults.length());
|
||||
docIdsToLoad = new AtomicArray<>(firstResults.length());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String firstPhaseName() {
|
||||
return "dfs";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request, ActionListener<DfsSearchResult> listener) {
|
||||
searchService.sendExecuteDfs(node, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void moveToSecondPhase() {
|
||||
final AggregatedDfs dfs = searchPhaseController.aggregateDfs(firstResults);
|
||||
final AtomicInteger counter = new AtomicInteger(firstResults.asList().size());
|
||||
for (final AtomicArray.Entry<DfsSearchResult> entry : firstResults.asList()) {
|
||||
DfsSearchResult dfsResult = entry.value;
|
||||
DiscoveryNode node = nodes.get(dfsResult.shardTarget().nodeId());
|
||||
QuerySearchRequest querySearchRequest = new QuerySearchRequest(request, dfsResult.id(), dfs);
|
||||
executeQuery(entry.index, dfsResult, counter, querySearchRequest, node);
|
||||
}
|
||||
}
|
||||
|
||||
void executeQuery(final int shardIndex, final DfsSearchResult dfsResult, final AtomicInteger counter, final QuerySearchRequest querySearchRequest, final DiscoveryNode node) {
|
||||
searchService.sendExecuteQuery(node, querySearchRequest, new ActionListener<QuerySearchResult>() {
|
||||
@Override
|
||||
public void onResponse(QuerySearchResult result) {
|
||||
result.shardTarget(dfsResult.shardTarget());
|
||||
queryResults.set(shardIndex, result);
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
executeFetchPhase();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
try {
|
||||
onQueryFailure(t, querySearchRequest, shardIndex, dfsResult, counter);
|
||||
} finally {
|
||||
// the query might not have been executed at all (for example because thread pool rejected execution)
|
||||
// and the search context that was created in dfs phase might not be released.
|
||||
// release it again to be in the safe side
|
||||
sendReleaseSearchContext(querySearchRequest.id(), node);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void onQueryFailure(Throwable t, QuerySearchRequest querySearchRequest, int shardIndex, DfsSearchResult dfsResult, AtomicInteger counter) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] Failed to execute query phase", t, querySearchRequest.id());
|
||||
}
|
||||
this.addShardFailure(shardIndex, dfsResult.shardTarget(), t);
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
if (successfulOps.get() == 0) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "all shards failed", buildShardFailures()));
|
||||
} else {
|
||||
executeFetchPhase();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void executeFetchPhase() {
|
||||
try {
|
||||
innerExecuteFetchPhase();
|
||||
} catch (Throwable e) {
|
||||
listener.onFailure(new ReduceSearchPhaseException("query", "", e, buildShardFailures()));
|
||||
}
|
||||
}
|
||||
|
||||
void innerExecuteFetchPhase() throws Exception {
|
||||
boolean useScroll = request.scroll() != null;
|
||||
sortedShardList = searchPhaseController.sortDocs(useScroll, queryResults);
|
||||
searchPhaseController.fillDocIdsToLoad(docIdsToLoad, sortedShardList);
|
||||
|
||||
if (docIdsToLoad.asList().isEmpty()) {
|
||||
finishHim();
|
||||
return;
|
||||
}
|
||||
|
||||
final ScoreDoc[] lastEmittedDocPerShard = searchPhaseController.getLastEmittedDocPerShard(
|
||||
request, sortedShardList, firstResults.length()
|
||||
);
|
||||
final AtomicInteger counter = new AtomicInteger(docIdsToLoad.asList().size());
|
||||
for (final AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
|
||||
QuerySearchResult queryResult = queryResults.get(entry.index);
|
||||
DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId());
|
||||
ShardFetchSearchRequest fetchSearchRequest = createFetchRequest(queryResult, entry, lastEmittedDocPerShard);
|
||||
executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node);
|
||||
}
|
||||
}
|
||||
|
||||
void executeFetch(final int shardIndex, final SearchShardTarget shardTarget, final AtomicInteger counter, final ShardFetchSearchRequest fetchSearchRequest, DiscoveryNode node) {
|
||||
searchService.sendExecuteFetch(node, fetchSearchRequest, new ActionListener<FetchSearchResult>() {
|
||||
@Override
|
||||
public void onResponse(FetchSearchResult result) {
|
||||
result.shardTarget(shardTarget);
|
||||
fetchResults.set(shardIndex, result);
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
// the search context might not be cleared on the node where the fetch was executed for example
|
||||
// because the action was rejected by the thread pool. in this case we need to send a dedicated
|
||||
// request to clear the search context. by setting docIdsToLoad to null, the context will be cleared
|
||||
// in TransportSearchTypeAction.releaseIrrelevantSearchContexts() after the search request is done.
|
||||
docIdsToLoad.set(shardIndex, null);
|
||||
onFetchFailure(t, fetchSearchRequest, shardIndex, shardTarget, counter);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void onFetchFailure(Throwable t, ShardFetchSearchRequest fetchSearchRequest, int shardIndex, SearchShardTarget shardTarget, AtomicInteger counter) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] Failed to execute fetch phase", t, fetchSearchRequest.id());
|
||||
}
|
||||
this.addShardFailure(shardIndex, shardTarget, t);
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
private void finishHim() {
|
||||
threadPool.executor(ThreadPool.Names.SEARCH).execute(new ActionRunnable<SearchResponse>(listener) {
|
||||
@Override
|
||||
public void doRun() throws IOException {
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryResults,
|
||||
fetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(), buildTookInMillis(), buildShardFailures()));
|
||||
releaseIrrelevantSearchContexts(queryResults, docIdsToLoad);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
try {
|
||||
ReduceSearchPhaseException failure = new ReduceSearchPhaseException("merge", "", t, buildShardFailures());
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("failed to reduce search", failure);
|
||||
}
|
||||
super.onFailure(failure);
|
||||
} finally {
|
||||
releaseIrrelevantSearchContexts(queryResults, docIdsToLoad);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,104 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search.type;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRunnable;
|
||||
import org.elasticsearch.action.search.ReduceSearchPhaseException;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.search.action.SearchServiceTransportAction;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.fetch.QueryFetchSearchResult;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.action.search.type.TransportSearchHelper.buildScrollId;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class TransportSearchQueryAndFetchAction extends TransportSearchTypeAction {
|
||||
|
||||
@Inject
|
||||
public TransportSearchQueryAndFetchAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, threadPool, clusterService, searchService, searchPhaseController, actionFilters, indexNameExpressionResolver);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(SearchRequest searchRequest, ActionListener<SearchResponse> listener) {
|
||||
new AsyncAction(searchRequest, listener).start();
|
||||
}
|
||||
|
||||
private class AsyncAction extends BaseAsyncAction<QueryFetchSearchResult> {
|
||||
|
||||
private AsyncAction(SearchRequest request, ActionListener<SearchResponse> listener) {
|
||||
super(request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String firstPhaseName() {
|
||||
return "query_fetch";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request, ActionListener<QueryFetchSearchResult> listener) {
|
||||
searchService.sendExecuteFetch(node, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void moveToSecondPhase() throws Exception {
|
||||
threadPool.executor(ThreadPool.Names.SEARCH).execute(new ActionRunnable<SearchResponse>(listener) {
|
||||
@Override
|
||||
public void doRun() throws IOException {
|
||||
boolean useScroll = request.scroll() != null;
|
||||
sortedShardList = searchPhaseController.sortDocs(useScroll, firstResults);
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, firstResults,
|
||||
firstResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = buildScrollId(request.searchType(), firstResults, null);
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(), buildTookInMillis(), buildShardFailures()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
ReduceSearchPhaseException failure = new ReduceSearchPhaseException("merge", "", t, buildShardFailures());
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("failed to reduce search", failure);
|
||||
}
|
||||
super.onFailure(failure);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,173 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search.type;
|
||||
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRunnable;
|
||||
import org.elasticsearch.action.search.ReduceSearchPhaseException;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.action.SearchServiceTransportAction;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.fetch.FetchSearchResult;
|
||||
import org.elasticsearch.search.fetch.ShardFetchSearchRequest;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.search.query.QuerySearchResultProvider;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class TransportSearchQueryThenFetchAction extends TransportSearchTypeAction {
|
||||
|
||||
@Inject
|
||||
public TransportSearchQueryThenFetchAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, threadPool, clusterService, searchService, searchPhaseController, actionFilters, indexNameExpressionResolver);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(SearchRequest searchRequest, ActionListener<SearchResponse> listener) {
|
||||
new AsyncAction(searchRequest, listener).start();
|
||||
}
|
||||
|
||||
private class AsyncAction extends BaseAsyncAction<QuerySearchResultProvider> {
|
||||
|
||||
final AtomicArray<FetchSearchResult> fetchResults;
|
||||
final AtomicArray<IntArrayList> docIdsToLoad;
|
||||
|
||||
private AsyncAction(SearchRequest request, ActionListener<SearchResponse> listener) {
|
||||
super(request, listener);
|
||||
fetchResults = new AtomicArray<>(firstResults.length());
|
||||
docIdsToLoad = new AtomicArray<>(firstResults.length());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String firstPhaseName() {
|
||||
return "query";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request, ActionListener<QuerySearchResultProvider> listener) {
|
||||
searchService.sendExecuteQuery(node, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void moveToSecondPhase() throws Exception {
|
||||
boolean useScroll = request.scroll() != null;
|
||||
sortedShardList = searchPhaseController.sortDocs(useScroll, firstResults);
|
||||
searchPhaseController.fillDocIdsToLoad(docIdsToLoad, sortedShardList);
|
||||
|
||||
if (docIdsToLoad.asList().isEmpty()) {
|
||||
finishHim();
|
||||
return;
|
||||
}
|
||||
|
||||
final ScoreDoc[] lastEmittedDocPerShard = searchPhaseController.getLastEmittedDocPerShard(
|
||||
request, sortedShardList, firstResults.length()
|
||||
);
|
||||
final AtomicInteger counter = new AtomicInteger(docIdsToLoad.asList().size());
|
||||
for (AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
|
||||
QuerySearchResultProvider queryResult = firstResults.get(entry.index);
|
||||
DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId());
|
||||
ShardFetchSearchRequest fetchSearchRequest = createFetchRequest(queryResult.queryResult(), entry, lastEmittedDocPerShard);
|
||||
executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node);
|
||||
}
|
||||
}
|
||||
|
||||
void executeFetch(final int shardIndex, final SearchShardTarget shardTarget, final AtomicInteger counter, final ShardFetchSearchRequest fetchSearchRequest, DiscoveryNode node) {
|
||||
searchService.sendExecuteFetch(node, fetchSearchRequest, new ActionListener<FetchSearchResult>() {
|
||||
@Override
|
||||
public void onResponse(FetchSearchResult result) {
|
||||
result.shardTarget(shardTarget);
|
||||
fetchResults.set(shardIndex, result);
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
// the search context might not be cleared on the node where the fetch was executed for example
|
||||
// because the action was rejected by the thread pool. in this case we need to send a dedicated
|
||||
// request to clear the search context. by setting docIdsToLoad to null, the context will be cleared
|
||||
// in TransportSearchTypeAction.releaseIrrelevantSearchContexts() after the search request is done.
|
||||
docIdsToLoad.set(shardIndex, null);
|
||||
onFetchFailure(t, fetchSearchRequest, shardIndex, shardTarget, counter);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void onFetchFailure(Throwable t, ShardFetchSearchRequest fetchSearchRequest, int shardIndex, SearchShardTarget shardTarget, AtomicInteger counter) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] Failed to execute fetch phase", t, fetchSearchRequest.id());
|
||||
}
|
||||
this.addShardFailure(shardIndex, shardTarget, t);
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
private void finishHim() {
|
||||
threadPool.executor(ThreadPool.Names.SEARCH).execute(new ActionRunnable<SearchResponse>(listener) {
|
||||
@Override
|
||||
public void doRun() throws IOException {
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, firstResults,
|
||||
fetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(), buildTookInMillis(), buildShardFailures()));
|
||||
releaseIrrelevantSearchContexts(firstResults, docIdsToLoad);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
try {
|
||||
ReduceSearchPhaseException failure = new ReduceSearchPhaseException("fetch", "", t, buildShardFailures());
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("failed to reduce search", failure);
|
||||
}
|
||||
super.onFailure(failure);
|
||||
} finally {
|
||||
releaseIrrelevantSearchContexts(firstResults, docIdsToLoad);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,205 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search.type;
|
||||
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.search.ReduceSearchPhaseException;
|
||||
import org.elasticsearch.action.search.SearchPhaseExecutionException;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.search.SearchScrollRequest;
|
||||
import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.action.SearchServiceTransportAction;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.fetch.QueryFetchSearchResult;
|
||||
import org.elasticsearch.search.fetch.ScrollQueryFetchSearchResult;
|
||||
import org.elasticsearch.search.internal.InternalScrollSearchRequest;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import static org.elasticsearch.action.search.type.TransportSearchHelper.internalScrollSearchRequest;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class TransportSearchScrollQueryAndFetchAction extends AbstractComponent {
|
||||
|
||||
private final ClusterService clusterService;
|
||||
private final SearchServiceTransportAction searchService;
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
|
||||
@Inject
|
||||
public TransportSearchScrollQueryAndFetchAction(Settings settings, ClusterService clusterService,
|
||||
SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController) {
|
||||
super(settings);
|
||||
this.clusterService = clusterService;
|
||||
this.searchService = searchService;
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
}
|
||||
|
||||
public void execute(SearchScrollRequest request, ParsedScrollId scrollId, ActionListener<SearchResponse> listener) {
|
||||
new AsyncAction(request, scrollId, listener).start();
|
||||
}
|
||||
|
||||
private class AsyncAction extends AbstractAsyncAction {
|
||||
|
||||
private final SearchScrollRequest request;
|
||||
private final ActionListener<SearchResponse> listener;
|
||||
private final ParsedScrollId scrollId;
|
||||
private final DiscoveryNodes nodes;
|
||||
|
||||
private volatile AtomicArray<ShardSearchFailure> shardFailures;
|
||||
private final AtomicArray<QueryFetchSearchResult> queryFetchResults;
|
||||
|
||||
private final AtomicInteger successfulOps;
|
||||
private final AtomicInteger counter;
|
||||
|
||||
private AsyncAction(SearchScrollRequest request, ParsedScrollId scrollId, ActionListener<SearchResponse> listener) {
|
||||
this.request = request;
|
||||
this.listener = listener;
|
||||
this.scrollId = scrollId;
|
||||
this.nodes = clusterService.state().nodes();
|
||||
this.successfulOps = new AtomicInteger(scrollId.getContext().length);
|
||||
this.counter = new AtomicInteger(scrollId.getContext().length);
|
||||
|
||||
this.queryFetchResults = new AtomicArray<>(scrollId.getContext().length);
|
||||
}
|
||||
|
||||
protected final ShardSearchFailure[] buildShardFailures() {
|
||||
if (shardFailures == null) {
|
||||
return ShardSearchFailure.EMPTY_ARRAY;
|
||||
}
|
||||
List<AtomicArray.Entry<ShardSearchFailure>> entries = shardFailures.asList();
|
||||
ShardSearchFailure[] failures = new ShardSearchFailure[entries.size()];
|
||||
for (int i = 0; i < failures.length; i++) {
|
||||
failures[i] = entries.get(i).value;
|
||||
}
|
||||
return failures;
|
||||
}
|
||||
|
||||
// we do our best to return the shard failures, but its ok if its not fully concurrently safe
|
||||
// we simply try and return as much as possible
|
||||
protected final void addShardFailure(final int shardIndex, ShardSearchFailure failure) {
|
||||
if (shardFailures == null) {
|
||||
shardFailures = new AtomicArray<>(scrollId.getContext().length);
|
||||
}
|
||||
shardFailures.set(shardIndex, failure);
|
||||
}
|
||||
|
||||
public void start() {
|
||||
if (scrollId.getContext().length == 0) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "no nodes to search on", ShardSearchFailure.EMPTY_ARRAY));
|
||||
return;
|
||||
}
|
||||
|
||||
ScrollIdForNode[] context = scrollId.getContext();
|
||||
for (int i = 0; i < context.length; i++) {
|
||||
ScrollIdForNode target = context[i];
|
||||
DiscoveryNode node = nodes.get(target.getNode());
|
||||
if (node != null) {
|
||||
executePhase(i, node, target.getScrollId());
|
||||
} else {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Node [" + target.getNode() + "] not available for scroll request [" + scrollId.getSource() + "]");
|
||||
}
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (ScrollIdForNode target : scrollId.getContext()) {
|
||||
DiscoveryNode node = nodes.get(target.getNode());
|
||||
if (node == null) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Node [" + target.getNode() + "] not available for scroll request [" + scrollId.getSource() + "]");
|
||||
}
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void executePhase(final int shardIndex, DiscoveryNode node, final long searchId) {
|
||||
InternalScrollSearchRequest internalRequest = internalScrollSearchRequest(searchId, request);
|
||||
searchService.sendExecuteFetch(node, internalRequest, new ActionListener<ScrollQueryFetchSearchResult>() {
|
||||
@Override
|
||||
public void onResponse(ScrollQueryFetchSearchResult result) {
|
||||
queryFetchResults.set(shardIndex, result.result());
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
onPhaseFailure(t, searchId, shardIndex);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private void onPhaseFailure(Throwable t, long searchId, int shardIndex) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] Failed to execute query phase", t, searchId);
|
||||
}
|
||||
addShardFailure(shardIndex, new ShardSearchFailure(t));
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
if (successfulOps.get() == 0) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query_fetch", "all shards failed", t, buildShardFailures()));
|
||||
} else {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void finishHim() {
|
||||
try {
|
||||
innerFinishHim();
|
||||
} catch (Throwable e) {
|
||||
listener.onFailure(new ReduceSearchPhaseException("fetch", "", e, buildShardFailures()));
|
||||
}
|
||||
}
|
||||
|
||||
private void innerFinishHim() throws Exception {
|
||||
ScoreDoc[] sortedShardList = searchPhaseController.sortDocs(true, queryFetchResults);
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryFetchResults,
|
||||
queryFetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = request.scrollId();
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, this.scrollId.getContext().length, successfulOps.get(),
|
||||
buildTookInMillis(), buildShardFailures()));
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,255 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search.type;
|
||||
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.search.ReduceSearchPhaseException;
|
||||
import org.elasticsearch.action.search.SearchPhaseExecutionException;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.search.SearchScrollRequest;
|
||||
import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.action.SearchServiceTransportAction;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.fetch.FetchSearchResult;
|
||||
import org.elasticsearch.search.fetch.ShardFetchRequest;
|
||||
import org.elasticsearch.search.internal.InternalScrollSearchRequest;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.query.QuerySearchResult;
|
||||
import org.elasticsearch.search.query.ScrollQuerySearchResult;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import static org.elasticsearch.action.search.type.TransportSearchHelper.internalScrollSearchRequest;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class TransportSearchScrollQueryThenFetchAction extends AbstractComponent {
|
||||
|
||||
private final ClusterService clusterService;
|
||||
private final SearchServiceTransportAction searchService;
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
|
||||
@Inject
|
||||
public TransportSearchScrollQueryThenFetchAction(Settings settings, ClusterService clusterService,
|
||||
SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController) {
|
||||
super(settings);
|
||||
this.clusterService = clusterService;
|
||||
this.searchService = searchService;
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
}
|
||||
|
||||
public void execute(SearchScrollRequest request, ParsedScrollId scrollId, ActionListener<SearchResponse> listener) {
|
||||
new AsyncAction(request, scrollId, listener).start();
|
||||
}
|
||||
|
||||
private class AsyncAction extends AbstractAsyncAction {
|
||||
|
||||
private final SearchScrollRequest request;
|
||||
|
||||
private final ActionListener<SearchResponse> listener;
|
||||
|
||||
private final ParsedScrollId scrollId;
|
||||
|
||||
private final DiscoveryNodes nodes;
|
||||
|
||||
private volatile AtomicArray<ShardSearchFailure> shardFailures;
|
||||
final AtomicArray<QuerySearchResult> queryResults;
|
||||
final AtomicArray<FetchSearchResult> fetchResults;
|
||||
|
||||
private volatile ScoreDoc[] sortedShardList;
|
||||
|
||||
private final AtomicInteger successfulOps;
|
||||
|
||||
private AsyncAction(SearchScrollRequest request, ParsedScrollId scrollId, ActionListener<SearchResponse> listener) {
|
||||
this.request = request;
|
||||
this.listener = listener;
|
||||
this.scrollId = scrollId;
|
||||
this.nodes = clusterService.state().nodes();
|
||||
this.successfulOps = new AtomicInteger(scrollId.getContext().length);
|
||||
|
||||
this.queryResults = new AtomicArray<>(scrollId.getContext().length);
|
||||
this.fetchResults = new AtomicArray<>(scrollId.getContext().length);
|
||||
}
|
||||
|
||||
protected final ShardSearchFailure[] buildShardFailures() {
|
||||
if (shardFailures == null) {
|
||||
return ShardSearchFailure.EMPTY_ARRAY;
|
||||
}
|
||||
List<AtomicArray.Entry<ShardSearchFailure>> entries = shardFailures.asList();
|
||||
ShardSearchFailure[] failures = new ShardSearchFailure[entries.size()];
|
||||
for (int i = 0; i < failures.length; i++) {
|
||||
failures[i] = entries.get(i).value;
|
||||
}
|
||||
return failures;
|
||||
}
|
||||
|
||||
// we do our best to return the shard failures, but its ok if its not fully concurrently safe
|
||||
// we simply try and return as much as possible
|
||||
protected final void addShardFailure(final int shardIndex, ShardSearchFailure failure) {
|
||||
if (shardFailures == null) {
|
||||
shardFailures = new AtomicArray<>(scrollId.getContext().length);
|
||||
}
|
||||
shardFailures.set(shardIndex, failure);
|
||||
}
|
||||
|
||||
public void start() {
|
||||
if (scrollId.getContext().length == 0) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "no nodes to search on", ShardSearchFailure.EMPTY_ARRAY));
|
||||
return;
|
||||
}
|
||||
final AtomicInteger counter = new AtomicInteger(scrollId.getContext().length);
|
||||
|
||||
ScrollIdForNode[] context = scrollId.getContext();
|
||||
for (int i = 0; i < context.length; i++) {
|
||||
ScrollIdForNode target = context[i];
|
||||
DiscoveryNode node = nodes.get(target.getNode());
|
||||
if (node != null) {
|
||||
executeQueryPhase(i, counter, node, target.getScrollId());
|
||||
} else {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Node [" + target.getNode() + "] not available for scroll request [" + scrollId.getSource() + "]");
|
||||
}
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
try {
|
||||
executeFetchPhase();
|
||||
} catch (Throwable e) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "Fetch failed", e, ShardSearchFailure.EMPTY_ARRAY));
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void executeQueryPhase(final int shardIndex, final AtomicInteger counter, DiscoveryNode node, final long searchId) {
|
||||
InternalScrollSearchRequest internalRequest = internalScrollSearchRequest(searchId, request);
|
||||
searchService.sendExecuteQuery(node, internalRequest, new ActionListener<ScrollQuerySearchResult>() {
|
||||
@Override
|
||||
public void onResponse(ScrollQuerySearchResult result) {
|
||||
queryResults.set(shardIndex, result.queryResult());
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
try {
|
||||
executeFetchPhase();
|
||||
} catch (Throwable e) {
|
||||
onFailure(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
onQueryPhaseFailure(shardIndex, counter, searchId, t);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void onQueryPhaseFailure(final int shardIndex, final AtomicInteger counter, final long searchId, Throwable t) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] Failed to execute query phase", t, searchId);
|
||||
}
|
||||
addShardFailure(shardIndex, new ShardSearchFailure(t));
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
if (successfulOps.get() == 0) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "all shards failed", t, buildShardFailures()));
|
||||
} else {
|
||||
try {
|
||||
executeFetchPhase();
|
||||
} catch (Throwable e) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "Fetch failed", e, ShardSearchFailure.EMPTY_ARRAY));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void executeFetchPhase() throws Exception {
|
||||
sortedShardList = searchPhaseController.sortDocs(true, queryResults);
|
||||
AtomicArray<IntArrayList> docIdsToLoad = new AtomicArray<>(queryResults.length());
|
||||
searchPhaseController.fillDocIdsToLoad(docIdsToLoad, sortedShardList);
|
||||
|
||||
if (docIdsToLoad.asList().isEmpty()) {
|
||||
finishHim();
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
final ScoreDoc[] lastEmittedDocPerShard = searchPhaseController.getLastEmittedDocPerShard(sortedShardList, queryResults.length());
|
||||
final AtomicInteger counter = new AtomicInteger(docIdsToLoad.asList().size());
|
||||
for (final AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
|
||||
IntArrayList docIds = entry.value;
|
||||
final QuerySearchResult querySearchResult = queryResults.get(entry.index);
|
||||
ScoreDoc lastEmittedDoc = lastEmittedDocPerShard[entry.index];
|
||||
ShardFetchRequest shardFetchRequest = new ShardFetchRequest(querySearchResult.id(), docIds, lastEmittedDoc);
|
||||
DiscoveryNode node = nodes.get(querySearchResult.shardTarget().nodeId());
|
||||
searchService.sendExecuteFetchScroll(node, shardFetchRequest, new ActionListener<FetchSearchResult>() {
|
||||
@Override
|
||||
public void onResponse(FetchSearchResult result) {
|
||||
result.shardTarget(querySearchResult.shardTarget());
|
||||
fetchResults.set(entry.index, result);
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Failed to execute fetch phase", t);
|
||||
}
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
private void finishHim() {
|
||||
try {
|
||||
innerFinishHim();
|
||||
} catch (Throwable e) {
|
||||
listener.onFailure(new ReduceSearchPhaseException("fetch", "", e, buildShardFailures()));
|
||||
}
|
||||
}
|
||||
|
||||
private void innerFinishHim() {
|
||||
InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryResults, fetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = request.scrollId();
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, this.scrollId.getContext().length, successfulOps.get(),
|
||||
buildTookInMillis(), buildShardFailures()));
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,406 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search.type;
|
||||
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.NoShardAvailableActionException;
|
||||
import org.elasticsearch.action.search.ReduceSearchPhaseException;
|
||||
import org.elasticsearch.action.search.SearchAction;
|
||||
import org.elasticsearch.action.search.SearchPhaseExecutionException;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.TransportAction;
|
||||
import org.elasticsearch.action.support.TransportActions;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.SearchPhaseResult;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.action.SearchServiceTransportAction;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.fetch.ShardFetchSearchRequest;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.search.query.QuerySearchResult;
|
||||
import org.elasticsearch.search.query.QuerySearchResultProvider;
|
||||
import org.elasticsearch.tasks.TaskManager;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import static org.elasticsearch.action.search.type.TransportSearchHelper.internalSearchRequest;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public abstract class TransportSearchTypeAction extends TransportAction<SearchRequest, SearchResponse> {
|
||||
|
||||
protected final ClusterService clusterService;
|
||||
|
||||
protected final SearchServiceTransportAction searchService;
|
||||
|
||||
protected final SearchPhaseController searchPhaseController;
|
||||
|
||||
public TransportSearchTypeAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, SearchAction.NAME, threadPool, actionFilters, indexNameExpressionResolver, clusterService.getTaskManager());
|
||||
this.clusterService = clusterService;
|
||||
this.searchService = searchService;
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
}
|
||||
|
||||
protected abstract class BaseAsyncAction<FirstResult extends SearchPhaseResult> extends AbstractAsyncAction {
|
||||
|
||||
protected final ActionListener<SearchResponse> listener;
|
||||
|
||||
protected final GroupShardsIterator shardsIts;
|
||||
|
||||
protected final SearchRequest request;
|
||||
|
||||
protected final ClusterState clusterState;
|
||||
protected final DiscoveryNodes nodes;
|
||||
|
||||
protected final int expectedSuccessfulOps;
|
||||
private final int expectedTotalOps;
|
||||
|
||||
protected final AtomicInteger successfulOps = new AtomicInteger();
|
||||
private final AtomicInteger totalOps = new AtomicInteger();
|
||||
|
||||
protected final AtomicArray<FirstResult> firstResults;
|
||||
private volatile AtomicArray<ShardSearchFailure> shardFailures;
|
||||
private final Object shardFailuresMutex = new Object();
|
||||
protected volatile ScoreDoc[] sortedShardList;
|
||||
|
||||
protected BaseAsyncAction(SearchRequest request, ActionListener<SearchResponse> listener) {
|
||||
this.request = request;
|
||||
this.listener = listener;
|
||||
|
||||
this.clusterState = clusterService.state();
|
||||
nodes = clusterState.nodes();
|
||||
|
||||
clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ);
|
||||
|
||||
// TODO: I think startTime() should become part of ActionRequest and that should be used both for index name
|
||||
// date math expressions and $now in scripts. This way all apis will deal with now in the same way instead
|
||||
// of just for the _search api
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request.indicesOptions(), startTime(), request.indices());
|
||||
|
||||
for (String index : concreteIndices) {
|
||||
clusterState.blocks().indexBlockedRaiseException(ClusterBlockLevel.READ, index);
|
||||
}
|
||||
|
||||
Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, request.routing(), request.indices());
|
||||
|
||||
shardsIts = clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, request.preference());
|
||||
expectedSuccessfulOps = shardsIts.size();
|
||||
// we need to add 1 for non active partition, since we count it in the total!
|
||||
expectedTotalOps = shardsIts.totalSizeWith1ForEmpty();
|
||||
|
||||
firstResults = new AtomicArray<>(shardsIts.size());
|
||||
}
|
||||
|
||||
public void start() {
|
||||
if (expectedSuccessfulOps == 0) {
|
||||
// no search shards to search on, bail with empty response (it happens with search across _all with no indices around and consistent with broadcast operations)
|
||||
listener.onResponse(new SearchResponse(InternalSearchResponse.empty(), null, 0, 0, buildTookInMillis(), ShardSearchFailure.EMPTY_ARRAY));
|
||||
return;
|
||||
}
|
||||
int shardIndex = -1;
|
||||
for (final ShardIterator shardIt : shardsIts) {
|
||||
shardIndex++;
|
||||
final ShardRouting shard = shardIt.nextOrNull();
|
||||
if (shard != null) {
|
||||
performFirstPhase(shardIndex, shardIt, shard);
|
||||
} else {
|
||||
// really, no shards active in this group
|
||||
onFirstPhaseResult(shardIndex, null, null, shardIt, new NoShardAvailableActionException(shardIt.shardId()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void performFirstPhase(final int shardIndex, final ShardIterator shardIt, final ShardRouting shard) {
|
||||
if (shard == null) {
|
||||
// no more active shards... (we should not really get here, but just for safety)
|
||||
onFirstPhaseResult(shardIndex, null, null, shardIt, new NoShardAvailableActionException(shardIt.shardId()));
|
||||
} else {
|
||||
final DiscoveryNode node = nodes.get(shard.currentNodeId());
|
||||
if (node == null) {
|
||||
onFirstPhaseResult(shardIndex, shard, null, shardIt, new NoShardAvailableActionException(shardIt.shardId()));
|
||||
} else {
|
||||
String[] filteringAliases = indexNameExpressionResolver.filteringAliases(clusterState, shard.index().getName(), request.indices());
|
||||
sendExecuteFirstPhase(node, internalSearchRequest(shard, shardsIts.size(), request, filteringAliases, startTime()), new ActionListener<FirstResult>() {
|
||||
@Override
|
||||
public void onResponse(FirstResult result) {
|
||||
onFirstPhaseResult(shardIndex, shard, result, shardIt);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
onFirstPhaseResult(shardIndex, shard, node.id(), shardIt, t);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void onFirstPhaseResult(int shardIndex, ShardRouting shard, FirstResult result, ShardIterator shardIt) {
|
||||
result.shardTarget(new SearchShardTarget(shard.currentNodeId(), shard.index(), shard.id()));
|
||||
processFirstPhaseResult(shardIndex, result);
|
||||
// we need to increment successful ops first before we compare the exit condition otherwise if we
|
||||
// are fast we could concurrently update totalOps but then preempt one of the threads which can
|
||||
// cause the successor to read a wrong value from successfulOps if second phase is very fast ie. count etc.
|
||||
successfulOps.incrementAndGet();
|
||||
// increment all the "future" shards to update the total ops since we some may work and some may not...
|
||||
// and when that happens, we break on total ops, so we must maintain them
|
||||
final int xTotalOps = totalOps.addAndGet(shardIt.remaining() + 1);
|
||||
if (xTotalOps == expectedTotalOps) {
|
||||
try {
|
||||
innerMoveToSecondPhase();
|
||||
} catch (Throwable e) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug(shardIt.shardId() + ": Failed to execute [" + request + "] while moving to second phase", e);
|
||||
}
|
||||
raiseEarlyFailure(new ReduceSearchPhaseException(firstPhaseName(), "", e, buildShardFailures()));
|
||||
}
|
||||
} else if (xTotalOps > expectedTotalOps) {
|
||||
raiseEarlyFailure(new IllegalStateException("unexpected higher total ops [" + xTotalOps + "] compared to expected [" + expectedTotalOps + "]"));
|
||||
}
|
||||
}
|
||||
|
||||
void onFirstPhaseResult(final int shardIndex, @Nullable ShardRouting shard, @Nullable String nodeId, final ShardIterator shardIt, Throwable t) {
|
||||
// we always add the shard failure for a specific shard instance
|
||||
// we do make sure to clean it on a successful response from a shard
|
||||
SearchShardTarget shardTarget = new SearchShardTarget(nodeId, shardIt.shardId().getIndex(), shardIt.shardId().getId());
|
||||
addShardFailure(shardIndex, shardTarget, t);
|
||||
|
||||
if (totalOps.incrementAndGet() == expectedTotalOps) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
if (t != null && !TransportActions.isShardNotAvailableException(t)) {
|
||||
if (shard != null) {
|
||||
logger.debug(shard.shortSummary() + ": Failed to execute [" + request + "]", t);
|
||||
} else {
|
||||
logger.debug(shardIt.shardId() + ": Failed to execute [" + request + "]", t);
|
||||
}
|
||||
} else if (logger.isTraceEnabled()) {
|
||||
logger.trace("{}: Failed to execute [{}]", t, shard, request);
|
||||
}
|
||||
}
|
||||
final ShardSearchFailure[] shardSearchFailures = buildShardFailures();
|
||||
if (successfulOps.get() == 0) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("All shards failed for phase: [{}]", t, firstPhaseName());
|
||||
}
|
||||
|
||||
// no successful ops, raise an exception
|
||||
raiseEarlyFailure(new SearchPhaseExecutionException(firstPhaseName(), "all shards failed", t, shardSearchFailures));
|
||||
} else {
|
||||
try {
|
||||
innerMoveToSecondPhase();
|
||||
} catch (Throwable e) {
|
||||
raiseEarlyFailure(new ReduceSearchPhaseException(firstPhaseName(), "", e, shardSearchFailures));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
final ShardRouting nextShard = shardIt.nextOrNull();
|
||||
final boolean lastShard = nextShard == null;
|
||||
// trace log this exception
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace(executionFailureMsg(shard, shardIt, request, lastShard), t);
|
||||
}
|
||||
if (!lastShard) {
|
||||
try {
|
||||
performFirstPhase(shardIndex, shardIt, nextShard);
|
||||
} catch (Throwable t1) {
|
||||
onFirstPhaseResult(shardIndex, shard, shard.currentNodeId(), shardIt, t1);
|
||||
}
|
||||
} else {
|
||||
// no more shards active, add a failure
|
||||
if (logger.isDebugEnabled() && !logger.isTraceEnabled()) { // do not double log this exception
|
||||
if (t != null && !TransportActions.isShardNotAvailableException(t)) {
|
||||
logger.debug(executionFailureMsg(shard, shardIt, request, lastShard), t);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private String executionFailureMsg(@Nullable ShardRouting shard, final ShardIterator shardIt, SearchRequest request, boolean lastShard) {
|
||||
if (shard != null) {
|
||||
return shard.shortSummary() + ": Failed to execute [" + request + "] lastShard [" + lastShard + "]";
|
||||
} else {
|
||||
return shardIt.shardId() + ": Failed to execute [" + request + "] lastShard [" + lastShard + "]";
|
||||
}
|
||||
}
|
||||
|
||||
protected final ShardSearchFailure[] buildShardFailures() {
|
||||
AtomicArray<ShardSearchFailure> shardFailures = this.shardFailures;
|
||||
if (shardFailures == null) {
|
||||
return ShardSearchFailure.EMPTY_ARRAY;
|
||||
}
|
||||
List<AtomicArray.Entry<ShardSearchFailure>> entries = shardFailures.asList();
|
||||
ShardSearchFailure[] failures = new ShardSearchFailure[entries.size()];
|
||||
for (int i = 0; i < failures.length; i++) {
|
||||
failures[i] = entries.get(i).value;
|
||||
}
|
||||
return failures;
|
||||
}
|
||||
|
||||
protected final void addShardFailure(final int shardIndex, @Nullable SearchShardTarget shardTarget, Throwable t) {
|
||||
// we don't aggregate shard failures on non active shards (but do keep the header counts right)
|
||||
if (TransportActions.isShardNotAvailableException(t)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// lazily create shard failures, so we can early build the empty shard failure list in most cases (no failures)
|
||||
if (shardFailures == null) {
|
||||
synchronized (shardFailuresMutex) {
|
||||
if (shardFailures == null) {
|
||||
shardFailures = new AtomicArray<>(shardsIts.size());
|
||||
}
|
||||
}
|
||||
}
|
||||
ShardSearchFailure failure = shardFailures.get(shardIndex);
|
||||
if (failure == null) {
|
||||
shardFailures.set(shardIndex, new ShardSearchFailure(t, shardTarget));
|
||||
} else {
|
||||
// the failure is already present, try and not override it with an exception that is less meaningless
|
||||
// for example, getting illegal shard state
|
||||
if (TransportActions.isReadOverrideException(t)) {
|
||||
shardFailures.set(shardIndex, new ShardSearchFailure(t, shardTarget));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void raiseEarlyFailure(Throwable t) {
|
||||
for (AtomicArray.Entry<FirstResult> entry : firstResults.asList()) {
|
||||
try {
|
||||
DiscoveryNode node = nodes.get(entry.value.shardTarget().nodeId());
|
||||
sendReleaseSearchContext(entry.value.id(), node);
|
||||
} catch (Throwable t1) {
|
||||
logger.trace("failed to release context", t1);
|
||||
}
|
||||
}
|
||||
listener.onFailure(t);
|
||||
}
|
||||
|
||||
/**
|
||||
* Releases shard targets that are not used in the docsIdsToLoad.
|
||||
*/
|
||||
protected void releaseIrrelevantSearchContexts(AtomicArray<? extends QuerySearchResultProvider> queryResults,
|
||||
AtomicArray<IntArrayList> docIdsToLoad) {
|
||||
if (docIdsToLoad == null) {
|
||||
return;
|
||||
}
|
||||
// we only release search context that we did not fetch from if we are not scrolling
|
||||
if (request.scroll() == null) {
|
||||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : queryResults.asList()) {
|
||||
final TopDocs topDocs = entry.value.queryResult().queryResult().topDocs();
|
||||
if (topDocs != null && topDocs.scoreDocs.length > 0 // the shard had matches
|
||||
&& docIdsToLoad.get(entry.index) == null) { // but none of them made it to the global top docs
|
||||
try {
|
||||
DiscoveryNode node = nodes.get(entry.value.queryResult().shardTarget().nodeId());
|
||||
sendReleaseSearchContext(entry.value.queryResult().id(), node);
|
||||
} catch (Throwable t1) {
|
||||
logger.trace("failed to release context", t1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected void sendReleaseSearchContext(long contextId, DiscoveryNode node) {
|
||||
if (node != null) {
|
||||
searchService.sendFreeContext(node, contextId, request);
|
||||
}
|
||||
}
|
||||
|
||||
protected ShardFetchSearchRequest createFetchRequest(QuerySearchResult queryResult, AtomicArray.Entry<IntArrayList> entry, ScoreDoc[] lastEmittedDocPerShard) {
|
||||
if (lastEmittedDocPerShard != null) {
|
||||
ScoreDoc lastEmittedDoc = lastEmittedDocPerShard[entry.index];
|
||||
return new ShardFetchSearchRequest(request, queryResult.id(), entry.value, lastEmittedDoc);
|
||||
} else {
|
||||
return new ShardFetchSearchRequest(request, queryResult.id(), entry.value);
|
||||
}
|
||||
}
|
||||
|
||||
protected abstract void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request, ActionListener<FirstResult> listener);
|
||||
|
||||
protected final void processFirstPhaseResult(int shardIndex, FirstResult result) {
|
||||
firstResults.set(shardIndex, result);
|
||||
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("got first-phase result from {}", result != null ? result.shardTarget() : null);
|
||||
}
|
||||
|
||||
// clean a previous error on this shard group (note, this code will be serialized on the same shardIndex value level
|
||||
// so its ok concurrency wise to miss potentially the shard failures being created because of another failure
|
||||
// in the #addShardFailure, because by definition, it will happen on *another* shardIndex
|
||||
AtomicArray<ShardSearchFailure> shardFailures = this.shardFailures;
|
||||
if (shardFailures != null) {
|
||||
shardFailures.set(shardIndex, null);
|
||||
}
|
||||
}
|
||||
|
||||
final void innerMoveToSecondPhase() throws Exception {
|
||||
if (logger.isTraceEnabled()) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
boolean hadOne = false;
|
||||
for (int i = 0; i < firstResults.length(); i++) {
|
||||
FirstResult result = firstResults.get(i);
|
||||
if (result == null) {
|
||||
continue; // failure
|
||||
}
|
||||
if (hadOne) {
|
||||
sb.append(",");
|
||||
} else {
|
||||
hadOne = true;
|
||||
}
|
||||
sb.append(result.shardTarget());
|
||||
}
|
||||
|
||||
logger.trace("Moving to second phase, based on results from: {} (cluster state version: {})", sb, clusterState.version());
|
||||
}
|
||||
moveToSecondPhase();
|
||||
}
|
||||
|
||||
protected abstract void moveToSecondPhase() throws Exception;
|
||||
|
||||
protected abstract String firstPhaseName();
|
||||
}
|
||||
}
|
|
@ -30,6 +30,7 @@ import org.elasticsearch.common.component.AbstractComponent;
|
|||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.tasks.TaskListener;
|
||||
import org.elasticsearch.tasks.TaskManager;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
|
@ -72,6 +73,13 @@ public abstract class TransportAction<Request extends ActionRequest<Request>, Re
|
|||
* This is a typical behavior.
|
||||
*/
|
||||
public final Task execute(Request request, ActionListener<Response> listener) {
|
||||
/*
|
||||
* While this version of execute could delegate to the TaskListener
|
||||
* version of execute that'd add yet another layer of wrapping on the
|
||||
* listener and prevent us from using the listener bare if there isn't a
|
||||
* task. That just seems like too many objects. Thus the two versions of
|
||||
* this method.
|
||||
*/
|
||||
Task task = taskManager.register("transport", actionName, request);
|
||||
if (task == null) {
|
||||
execute(null, request, listener);
|
||||
|
@ -93,11 +101,32 @@ public abstract class TransportAction<Request extends ActionRequest<Request>, Re
|
|||
return task;
|
||||
}
|
||||
|
||||
public final Task execute(Request request, TaskListener<Response> listener) {
|
||||
Task task = taskManager.register("transport", actionName, request);
|
||||
execute(task, request, new ActionListener<Response>() {
|
||||
@Override
|
||||
public void onResponse(Response response) {
|
||||
if (task != null) {
|
||||
taskManager.unregister(task);
|
||||
}
|
||||
listener.onResponse(task, response);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
if (task != null) {
|
||||
taskManager.unregister(task);
|
||||
}
|
||||
listener.onFailure(task, e);
|
||||
}
|
||||
});
|
||||
return task;
|
||||
}
|
||||
|
||||
/**
|
||||
* Use this method when the transport action should continue to run in the context of the current task
|
||||
*/
|
||||
public final void execute(Task task, Request request, ActionListener<Response> listener) {
|
||||
|
||||
ActionRequestValidationException validationException = request.validate();
|
||||
if (validationException != null) {
|
||||
listener.onFailure(validationException);
|
||||
|
|
|
@ -110,7 +110,7 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
|
|||
}
|
||||
|
||||
|
||||
private class AsyncAction {
|
||||
class AsyncAction {
|
||||
|
||||
private final NodesRequest request;
|
||||
private final String[] nodesIds;
|
||||
|
@ -120,7 +120,7 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
|
|||
private final AtomicInteger counter = new AtomicInteger();
|
||||
private final Task task;
|
||||
|
||||
private AsyncAction(Task task, NodesRequest request, ActionListener<NodesResponse> listener) {
|
||||
AsyncAction(Task task, NodesRequest request, ActionListener<NodesResponse> listener) {
|
||||
this.task = task;
|
||||
this.request = request;
|
||||
this.listener = listener;
|
||||
|
@ -135,7 +135,7 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
|
|||
this.responses = new AtomicReferenceArray<>(this.nodesIds.length);
|
||||
}
|
||||
|
||||
private void start() {
|
||||
void start() {
|
||||
if (nodesIds.length == 0) {
|
||||
// nothing to notify
|
||||
threadPool.generic().execute(new Runnable() {
|
||||
|
@ -158,11 +158,6 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
|
|||
try {
|
||||
if (node == null) {
|
||||
onFailure(idx, nodeId, new NoSuchNodeException(nodeId));
|
||||
} else if (!clusterService.localNode().shouldConnectTo(node) && !clusterService.localNode().equals(node)) {
|
||||
// the check "!clusterService.localNode().equals(node)" is to maintain backward comp. where before
|
||||
// we allowed to connect from "local" client node to itself, certain tests rely on it, if we remove it, we need to fix
|
||||
// those (and they randomize the client node usage, so tricky to find when)
|
||||
onFailure(idx, nodeId, new NodeShouldNotConnectException(clusterService.localNode(), node));
|
||||
} else {
|
||||
ChildTaskRequest nodeRequest = newNodeRequest(nodeId, request);
|
||||
if (task != null) {
|
||||
|
|
|
@ -35,10 +35,8 @@ import org.elasticsearch.cluster.node.DiscoveryNodes;
|
|||
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.logging.LoggerMessageFormat;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.node.NodeClosedException;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.BaseTransportResponseHandler;
|
||||
|
@ -91,11 +89,11 @@ public abstract class TransportInstanceSingleOperationAction<Request extends Ins
|
|||
protected ClusterBlockException checkRequestBlock(ClusterState state, Request request) {
|
||||
return state.blocks().indexBlockedException(ClusterBlockLevel.WRITE, request.concreteIndex());
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolves the request. If the resolve means a different execution, then return false
|
||||
* here to indicate not to continue and execute this request.
|
||||
* Resolves the request. Throws an exception if the request cannot be resolved.
|
||||
*/
|
||||
protected abstract boolean resolveRequest(ClusterState state, Request request, ActionListener<Response> listener);
|
||||
protected abstract void resolveRequest(ClusterState state, Request request);
|
||||
|
||||
protected boolean retryOnFailure(Throwable e) {
|
||||
return false;
|
||||
|
@ -141,11 +139,7 @@ public abstract class TransportInstanceSingleOperationAction<Request extends Ins
|
|||
}
|
||||
}
|
||||
request.concreteIndex(indexNameExpressionResolver.concreteSingleIndex(observer.observedState(), request));
|
||||
// check if we need to execute, and if not, return
|
||||
if (!resolveRequest(observer.observedState(), request, listener)) {
|
||||
listener.onFailure(new IllegalStateException(LoggerMessageFormat.format("[{}][{}] request {} could not be resolved",request.index, request.shardId, actionName)));
|
||||
return;
|
||||
}
|
||||
resolveRequest(observer.observedState(), request);
|
||||
blockException = checkRequestBlock(observer.observedState(), request);
|
||||
if (blockException != null) {
|
||||
if (blockException.retryable()) {
|
||||
|
|
|
@ -71,7 +71,7 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
|
|||
* Sets the list of action masks for the actions that should be returned
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public final Request actions(String... actions) {
|
||||
public final Request setActions(String... actions) {
|
||||
this.actions = actions;
|
||||
return (Request) this;
|
||||
}
|
||||
|
@ -79,16 +79,16 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
|
|||
/**
|
||||
* Return the list of action masks for the actions that should be returned
|
||||
*/
|
||||
public String[] actions() {
|
||||
public String[] getActions() {
|
||||
return actions;
|
||||
}
|
||||
|
||||
public final String[] nodesIds() {
|
||||
public final String[] getNodesIds() {
|
||||
return nodesIds;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public final Request nodesIds(String... nodesIds) {
|
||||
public final Request setNodesIds(String... nodesIds) {
|
||||
this.nodesIds = nodesIds;
|
||||
return (Request) this;
|
||||
}
|
||||
|
@ -98,12 +98,12 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
|
|||
*
|
||||
* By default tasks with any ids are returned.
|
||||
*/
|
||||
public TaskId taskId() {
|
||||
public TaskId getTaskId() {
|
||||
return taskId;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public final Request taskId(TaskId taskId) {
|
||||
public final Request setTaskId(TaskId taskId) {
|
||||
this.taskId = taskId;
|
||||
return (Request) this;
|
||||
}
|
||||
|
@ -112,29 +112,29 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
|
|||
/**
|
||||
* Returns the parent task id that tasks should be filtered by
|
||||
*/
|
||||
public TaskId parentTaskId() {
|
||||
public TaskId getParentTaskId() {
|
||||
return parentTaskId;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public Request parentTaskId(TaskId parentTaskId) {
|
||||
public Request setParentTaskId(TaskId parentTaskId) {
|
||||
this.parentTaskId = parentTaskId;
|
||||
return (Request) this;
|
||||
}
|
||||
|
||||
|
||||
public TimeValue timeout() {
|
||||
public TimeValue getTimeout() {
|
||||
return this.timeout;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public final Request timeout(TimeValue timeout) {
|
||||
public final Request setTimeout(TimeValue timeout) {
|
||||
this.timeout = timeout;
|
||||
return (Request) this;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public final Request timeout(String timeout) {
|
||||
public final Request setTimeout(String timeout) {
|
||||
this.timeout = TimeValue.parseTimeValue(timeout, null, getClass().getSimpleName() + ".timeout");
|
||||
return (Request) this;
|
||||
}
|
||||
|
@ -162,11 +162,11 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
|
|||
}
|
||||
|
||||
public boolean match(Task task) {
|
||||
if (actions() != null && actions().length > 0 && Regex.simpleMatch(actions(), task.getAction()) == false) {
|
||||
if (getActions() != null && getActions().length > 0 && Regex.simpleMatch(getActions(), task.getAction()) == false) {
|
||||
return false;
|
||||
}
|
||||
if (taskId().isSet() == false) {
|
||||
if(taskId().getId() != task.getId()) {
|
||||
if (getTaskId().isSet() == false) {
|
||||
if(getTaskId().getId() != task.getId()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,19 +35,19 @@ public class TasksRequestBuilder <Request extends BaseTasksRequest<Request>, Res
|
|||
|
||||
@SuppressWarnings("unchecked")
|
||||
public final RequestBuilder setNodesIds(String... nodesIds) {
|
||||
request.nodesIds(nodesIds);
|
||||
request.setNodesIds(nodesIds);
|
||||
return (RequestBuilder) this;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public final RequestBuilder setActions(String... actions) {
|
||||
request.actions(actions);
|
||||
request.setActions(actions);
|
||||
return (RequestBuilder) this;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public final RequestBuilder setTimeout(TimeValue timeout) {
|
||||
request.timeout(timeout);
|
||||
request.setTimeout(timeout);
|
||||
return (RequestBuilder) this;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -124,25 +124,25 @@ public abstract class TransportTasksAction<
|
|||
}
|
||||
|
||||
protected String[] resolveNodes(TasksRequest request, ClusterState clusterState) {
|
||||
if (request.taskId().isSet()) {
|
||||
return clusterState.nodes().resolveNodesIds(request.nodesIds());
|
||||
if (request.getTaskId().isSet()) {
|
||||
return clusterState.nodes().resolveNodesIds(request.getNodesIds());
|
||||
} else {
|
||||
return new String[]{request.taskId().getNodeId()};
|
||||
return new String[]{request.getTaskId().getNodeId()};
|
||||
}
|
||||
}
|
||||
|
||||
protected void processTasks(TasksRequest request, Consumer<OperationTask> operation) {
|
||||
if (request.taskId().isSet() == false) {
|
||||
if (request.getTaskId().isSet() == false) {
|
||||
// we are only checking one task, we can optimize it
|
||||
Task task = taskManager.getTask(request.taskId().getId());
|
||||
Task task = taskManager.getTask(request.getTaskId().getId());
|
||||
if (task != null) {
|
||||
if (request.match(task)) {
|
||||
operation.accept((OperationTask) task);
|
||||
} else {
|
||||
throw new ResourceNotFoundException("task [{}] doesn't support this operation", request.taskId());
|
||||
throw new ResourceNotFoundException("task [{}] doesn't support this operation", request.getTaskId());
|
||||
}
|
||||
} else {
|
||||
throw new ResourceNotFoundException("task [{}] is missing", request.taskId());
|
||||
throw new ResourceNotFoundException("task [{}] is missing", request.getTaskId());
|
||||
}
|
||||
} else {
|
||||
for (Task task : taskManager.getTasks().values()) {
|
||||
|
@ -224,8 +224,8 @@ public abstract class TransportTasksAction<
|
|||
}
|
||||
} else {
|
||||
TransportRequestOptions.Builder builder = TransportRequestOptions.builder();
|
||||
if (request.timeout() != null) {
|
||||
builder.withTimeout(request.timeout());
|
||||
if (request.getTimeout() != null) {
|
||||
builder.withTimeout(request.getTimeout());
|
||||
}
|
||||
builder.withCompress(transportCompress());
|
||||
for (int i = 0; i < nodesIds.length; i++) {
|
||||
|
@ -235,12 +235,6 @@ public abstract class TransportTasksAction<
|
|||
try {
|
||||
if (node == null) {
|
||||
onFailure(idx, nodeId, new NoSuchNodeException(nodeId));
|
||||
} else if (!clusterService.localNode().shouldConnectTo(node) && !clusterService.localNode().equals(node)) {
|
||||
// the check "!clusterService.localNode().equals(node)" is to maintain backward comp. where before
|
||||
// we allowed to connect from "local" client node to itself, certain tests rely on it, if we remove it, we
|
||||
// need to fix
|
||||
// those (and they randomize the client node usage, so tricky to find when)
|
||||
onFailure(idx, nodeId, new NodeShouldNotConnectException(clusterService.localNode(), node));
|
||||
} else {
|
||||
NodeTaskRequest nodeRequest = new NodeTaskRequest(request);
|
||||
nodeRequest.setParentTask(clusterService.localNode().id(), task.getId());
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.elasticsearch.action.support.single.instance.TransportInstanceSingleO
|
|||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.routing.PlainShardIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
|
@ -99,13 +100,16 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
|
|||
}
|
||||
|
||||
@Override
|
||||
protected boolean resolveRequest(ClusterState state, UpdateRequest request, ActionListener<UpdateResponse> listener) {
|
||||
request.routing((state.metaData().resolveIndexRouting(request.parent(), request.routing(), request.index())));
|
||||
protected void resolveRequest(ClusterState state, UpdateRequest request) {
|
||||
resolveAndValidateRouting(state.metaData(), request.concreteIndex(), request);
|
||||
}
|
||||
|
||||
public static void resolveAndValidateRouting(MetaData metaData, String concreteIndex, UpdateRequest request) {
|
||||
request.routing((metaData.resolveIndexRouting(request.parent(), request.routing(), request.index())));
|
||||
// Fail fast on the node that received the request, rather than failing when translating on the index or delete request.
|
||||
if (request.routing() == null && state.getMetaData().routingRequired(request.concreteIndex(), request.type())) {
|
||||
throw new RoutingMissingException(request.concreteIndex(), request.type(), request.id());
|
||||
if (request.routing() == null && metaData.routingRequired(concreteIndex, request.type())) {
|
||||
throw new RoutingMissingException(concreteIndex, request.type(), request.id());
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -32,8 +32,6 @@ import org.elasticsearch.common.inject.CreationException;
|
|||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.LogConfigurator;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.network.NetworkService;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
|
@ -41,17 +39,12 @@ import org.elasticsearch.monitor.os.OsProbe;
|
|||
import org.elasticsearch.monitor.process.ProcessProbe;
|
||||
import org.elasticsearch.node.Node;
|
||||
import org.elasticsearch.node.internal.InternalSettingsPreparer;
|
||||
import org.elasticsearch.transport.TransportSettings;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.PrintStream;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Locale;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
||||
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
|
||||
|
@ -141,6 +134,8 @@ final class Bootstrap {
|
|||
// we've already logged this.
|
||||
}
|
||||
|
||||
JNANatives.trySetMaxNumberOfThreads();
|
||||
|
||||
// init lucene random seed. it will use /dev/urandom where available:
|
||||
StringHelper.randomId();
|
||||
}
|
||||
|
@ -188,26 +183,12 @@ final class Bootstrap {
|
|||
.put(settings)
|
||||
.put(InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING.getKey(), true)
|
||||
.build();
|
||||
enforceOrLogLimits(nodeSettings);
|
||||
|
||||
BootstrapCheck.check(nodeSettings);
|
||||
|
||||
node = new Node(nodeSettings);
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "Exception#printStackTrace()")
|
||||
private static void setupLogging(Settings settings) {
|
||||
try {
|
||||
Class.forName("org.apache.log4j.Logger");
|
||||
LogConfigurator.configure(settings, true);
|
||||
} catch (ClassNotFoundException e) {
|
||||
// no log4j
|
||||
} catch (NoClassDefFoundError e) {
|
||||
// no log4j
|
||||
} catch (Exception e) {
|
||||
sysError("Failed to configure logging...", false);
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
private static Environment initialSettings(boolean foreground) {
|
||||
Terminal terminal = foreground ? Terminal.DEFAULT : null;
|
||||
return InternalSettingsPreparer.prepareEnvironment(EMPTY_SETTINGS, terminal);
|
||||
|
@ -254,7 +235,7 @@ final class Bootstrap {
|
|||
|
||||
Environment environment = initialSettings(foreground);
|
||||
Settings settings = environment.settings();
|
||||
setupLogging(settings);
|
||||
LogConfigurator.configure(settings, true);
|
||||
checkForCustomConfFile();
|
||||
|
||||
if (environment.pidFile() != null) {
|
||||
|
@ -363,47 +344,4 @@ final class Bootstrap {
|
|||
}
|
||||
}
|
||||
|
||||
static final Set<Setting> ENFORCE_SETTINGS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(
|
||||
TransportSettings.BIND_HOST,
|
||||
TransportSettings.HOST,
|
||||
TransportSettings.PUBLISH_HOST,
|
||||
NetworkService.GLOBAL_NETWORK_HOST_SETTING,
|
||||
NetworkService.GLOBAL_NETWORK_BINDHOST_SETTING,
|
||||
NetworkService.GLOBAL_NETWORK_PUBLISHHOST_SETTING
|
||||
)));
|
||||
|
||||
private static boolean enforceLimits(Settings settings) {
|
||||
for (Setting setting : ENFORCE_SETTINGS) {
|
||||
if (setting.exists(settings)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static void enforceOrLogLimits(Settings settings) { // pkg private for testing
|
||||
/* We enforce limits once any network host is configured. In this case we assume the node is running in production
|
||||
* and all production limit checks must pass. This should be extended as we go to settings like:
|
||||
* - discovery.zen.minimum_master_nodes
|
||||
* - discovery.zen.ping.unicast.hosts is set if we use zen disco
|
||||
* - ensure we can write in all data directories
|
||||
* - fail if mlockall failed and was configured
|
||||
* - fail if vm.max_map_count is under a certain limit (not sure if this works cross platform)
|
||||
* - fail if the default cluster.name is used, if this is setup on network a real clustername should be used?*/
|
||||
final boolean enforceLimits = enforceLimits(settings);
|
||||
final ESLogger logger = Loggers.getLogger(Bootstrap.class);
|
||||
final long maxFileDescriptorCount = ProcessProbe.getInstance().getMaxFileDescriptorCount();
|
||||
if (maxFileDescriptorCount != -1) {
|
||||
final int fileDescriptorCountThreshold = (1 << 16);
|
||||
if (maxFileDescriptorCount < fileDescriptorCountThreshold) {
|
||||
if (enforceLimits){
|
||||
throw new IllegalStateException("max file descriptors [" + maxFileDescriptorCount
|
||||
+ "] for elasticsearch process likely too low, increase it to at least [" + fileDescriptorCountThreshold +"]");
|
||||
}
|
||||
logger.warn(
|
||||
"max file descriptors [{}] for elasticsearch process likely too low, consider increasing to at least [{}]",
|
||||
maxFileDescriptorCount, fileDescriptorCountThreshold);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,8 +26,8 @@ import org.elasticsearch.common.Strings;
|
|||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.cli.CliTool;
|
||||
import org.elasticsearch.common.cli.CliToolConfig;
|
||||
import org.elasticsearch.common.cli.Terminal;
|
||||
import org.elasticsearch.common.cli.UserError;
|
||||
import org.elasticsearch.common.cli.Terminal;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
|
@ -37,7 +37,6 @@ import java.util.Iterator;
|
|||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.common.cli.CliToolConfig.Builder.cmd;
|
||||
import static org.elasticsearch.common.cli.CliToolConfig.Builder.optionBuilder;
|
||||
|
|
|
@ -0,0 +1,252 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.bootstrap;
|
||||
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.network.NetworkService;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.monitor.process.ProcessProbe;
|
||||
import org.elasticsearch.transport.TransportSettings;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* We enforce limits once any network host is configured. In this case we assume the node is running in production
|
||||
* and all production limit checks must pass. This should be extended as we go to settings like:
|
||||
* - discovery.zen.minimum_master_nodes
|
||||
* - discovery.zen.ping.unicast.hosts is set if we use zen disco
|
||||
* - ensure we can write in all data directories
|
||||
* - fail if vm.max_map_count is under a certain limit (not sure if this works cross platform)
|
||||
* - fail if the default cluster.name is used, if this is setup on network a real clustername should be used?
|
||||
*/
|
||||
final class BootstrapCheck {
|
||||
|
||||
private BootstrapCheck() {
|
||||
}
|
||||
|
||||
/**
|
||||
* checks the current limits against the snapshot or release build
|
||||
* checks
|
||||
*
|
||||
* @param settings the current node settings
|
||||
*/
|
||||
public static void check(final Settings settings) {
|
||||
check(enforceLimits(settings), checks(settings));
|
||||
}
|
||||
|
||||
/**
|
||||
* executes the provided checks and fails the node if
|
||||
* enforceLimits is true, otherwise logs warnings
|
||||
*
|
||||
* @param enforceLimits true if the checks should be enforced or
|
||||
* warned
|
||||
* @param checks the checks to execute
|
||||
*/
|
||||
// visible for testing
|
||||
static void check(final boolean enforceLimits, final List<Check> checks) {
|
||||
final ESLogger logger = Loggers.getLogger(BootstrapCheck.class);
|
||||
|
||||
for (final Check check : checks) {
|
||||
final boolean fail = check.check();
|
||||
if (fail) {
|
||||
if (enforceLimits) {
|
||||
throw new RuntimeException(check.errorMessage());
|
||||
} else {
|
||||
logger.warn(check.errorMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The set of settings such that if any are set for the node, then
|
||||
* the checks are enforced
|
||||
*
|
||||
* @return the enforcement settings
|
||||
*/
|
||||
// visible for testing
|
||||
static Set<Setting> enforceSettings() {
|
||||
return Collections.unmodifiableSet(new HashSet<>(Arrays.asList(
|
||||
TransportSettings.BIND_HOST,
|
||||
TransportSettings.HOST,
|
||||
TransportSettings.PUBLISH_HOST,
|
||||
NetworkService.GLOBAL_NETWORK_HOST_SETTING,
|
||||
NetworkService.GLOBAL_NETWORK_BINDHOST_SETTING,
|
||||
NetworkService.GLOBAL_NETWORK_PUBLISHHOST_SETTING
|
||||
)));
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests if the checks should be enforced
|
||||
*
|
||||
* @param settings the current node settings
|
||||
* @return true if the checks should be enforced
|
||||
*/
|
||||
// visible for testing
|
||||
static boolean enforceLimits(final Settings settings) {
|
||||
return enforceSettings().stream().anyMatch(s -> s.exists(settings));
|
||||
}
|
||||
|
||||
// the list of checks to execute
|
||||
private static List<Check> checks(final Settings settings) {
|
||||
final List<Check> checks = new ArrayList<>();
|
||||
final FileDescriptorCheck fileDescriptorCheck
|
||||
= Constants.MAC_OS_X ? new OsXFileDescriptorCheck() : new FileDescriptorCheck();
|
||||
checks.add(fileDescriptorCheck);
|
||||
checks.add(new MlockallCheck(BootstrapSettings.MLOCKALL_SETTING.get(settings)));
|
||||
if (Constants.LINUX) {
|
||||
checks.add(new MaxNumberOfThreadsCheck());
|
||||
}
|
||||
return Collections.unmodifiableList(checks);
|
||||
}
|
||||
|
||||
/**
|
||||
* Encapsulates a limit check
|
||||
*/
|
||||
interface Check {
|
||||
|
||||
/**
|
||||
* test if the node fails the check
|
||||
*
|
||||
* @return true if the node failed the check
|
||||
*/
|
||||
boolean check();
|
||||
|
||||
/**
|
||||
* the message for a failed check
|
||||
*
|
||||
* @return the error message on check failure
|
||||
*/
|
||||
String errorMessage();
|
||||
|
||||
}
|
||||
|
||||
static class OsXFileDescriptorCheck extends FileDescriptorCheck {
|
||||
|
||||
public OsXFileDescriptorCheck() {
|
||||
// see constant OPEN_MAX defined in
|
||||
// /usr/include/sys/syslimits.h on OS X and its use in JVM
|
||||
// initialization in int os:init_2(void) defined in the JVM
|
||||
// code for BSD (contains OS X)
|
||||
super(10240);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// visible for testing
|
||||
static class FileDescriptorCheck implements Check {
|
||||
|
||||
private final int limit;
|
||||
|
||||
FileDescriptorCheck() {
|
||||
this(1 << 16);
|
||||
}
|
||||
|
||||
protected FileDescriptorCheck(final int limit) {
|
||||
if (limit <= 0) {
|
||||
throw new IllegalArgumentException("limit must be positive but was [" + limit + "]");
|
||||
}
|
||||
this.limit = limit;
|
||||
}
|
||||
|
||||
public final boolean check() {
|
||||
final long maxFileDescriptorCount = getMaxFileDescriptorCount();
|
||||
return maxFileDescriptorCount != -1 && maxFileDescriptorCount < limit;
|
||||
}
|
||||
|
||||
@Override
|
||||
public final String errorMessage() {
|
||||
return String.format(
|
||||
Locale.ROOT,
|
||||
"max file descriptors [%d] for elasticsearch process likely too low, increase to at least [%d]",
|
||||
getMaxFileDescriptorCount(),
|
||||
limit
|
||||
);
|
||||
}
|
||||
|
||||
// visible for testing
|
||||
long getMaxFileDescriptorCount() {
|
||||
return ProcessProbe.getInstance().getMaxFileDescriptorCount();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// visible for testing
|
||||
static class MlockallCheck implements Check {
|
||||
|
||||
private final boolean mlockallSet;
|
||||
|
||||
public MlockallCheck(final boolean mlockAllSet) {
|
||||
this.mlockallSet = mlockAllSet;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean check() {
|
||||
return mlockallSet && !isMemoryLocked();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String errorMessage() {
|
||||
return "memory locking requested for elasticsearch process but memory is not locked";
|
||||
}
|
||||
|
||||
// visible for testing
|
||||
boolean isMemoryLocked() {
|
||||
return Natives.isMemoryLocked();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static class MaxNumberOfThreadsCheck implements Check {
|
||||
|
||||
private final long maxNumberOfThreadsThreshold = 1 << 11;
|
||||
|
||||
@Override
|
||||
public boolean check() {
|
||||
return getMaxNumberOfThreads() != -1 && getMaxNumberOfThreads() < maxNumberOfThreadsThreshold;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String errorMessage() {
|
||||
return String.format(
|
||||
Locale.ROOT,
|
||||
"max number of threads [%d] for user [%s] likely too low, increase to at least [%d]",
|
||||
getMaxNumberOfThreads(),
|
||||
BootstrapInfo.getSystemProperties().get("user.name"),
|
||||
maxNumberOfThreadsThreshold);
|
||||
}
|
||||
|
||||
// visible for testing
|
||||
long getMaxNumberOfThreads() {
|
||||
return JNANatives.MAX_NUMBER_OF_THREADS;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -48,6 +48,9 @@ class JNANatives {
|
|||
// Set to true, in case policy can be applied to all threads of the process (even existing ones)
|
||||
// otherwise they are only inherited for new threads (ES app threads)
|
||||
static boolean LOCAL_SECCOMP_ALL = false;
|
||||
// set to the maximum number of threads that can be created for
|
||||
// the user ID that owns the running Elasticsearch process
|
||||
static long MAX_NUMBER_OF_THREADS = -1;
|
||||
|
||||
static void tryMlockall() {
|
||||
int errno = Integer.MIN_VALUE;
|
||||
|
@ -103,13 +106,29 @@ class JNANatives {
|
|||
}
|
||||
}
|
||||
|
||||
static void trySetMaxNumberOfThreads() {
|
||||
if (Constants.LINUX) {
|
||||
// this is only valid on Linux and the value *is* different on OS X
|
||||
// see /usr/include/sys/resource.h on OS X
|
||||
// on Linux the resource RLIMIT_NPROC means *the number of threads*
|
||||
// this is in opposition to BSD-derived OSes
|
||||
final int rlimit_nproc = 6;
|
||||
|
||||
final JNACLibrary.Rlimit rlimit = new JNACLibrary.Rlimit();
|
||||
if (JNACLibrary.getrlimit(rlimit_nproc, rlimit) == 0) {
|
||||
MAX_NUMBER_OF_THREADS = rlimit.rlim_cur.longValue();
|
||||
} else {
|
||||
logger.warn("unable to retrieve max number of threads [" + JNACLibrary.strerror(Native.getLastError()) + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static String rlimitToString(long value) {
|
||||
assert Constants.LINUX || Constants.MAC_OS_X;
|
||||
if (value == JNACLibrary.RLIM_INFINITY) {
|
||||
return "unlimited";
|
||||
} else {
|
||||
// TODO, on java 8 use Long.toUnsignedString, since that's what it is.
|
||||
return Long.toString(value);
|
||||
return Long.toUnsignedString(value);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -25,12 +25,12 @@ import org.elasticsearch.cluster.metadata.MetaData;
|
|||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
*
|
||||
* An event received by the local node, signaling that the cluster state has changed.
|
||||
*/
|
||||
public class ClusterChangedEvent {
|
||||
|
||||
|
@ -43,6 +43,9 @@ public class ClusterChangedEvent {
|
|||
private final DiscoveryNodes.Delta nodesDelta;
|
||||
|
||||
public ClusterChangedEvent(String source, ClusterState state, ClusterState previousState) {
|
||||
Objects.requireNonNull(source, "source must not be null");
|
||||
Objects.requireNonNull(state, "state must not be null");
|
||||
Objects.requireNonNull(previousState, "previousState must not be null");
|
||||
this.source = source;
|
||||
this.state = state;
|
||||
this.previousState = previousState;
|
||||
|
@ -56,19 +59,35 @@ public class ClusterChangedEvent {
|
|||
return this.source;
|
||||
}
|
||||
|
||||
/**
|
||||
* The new cluster state that caused this change event.
|
||||
*/
|
||||
public ClusterState state() {
|
||||
return this.state;
|
||||
}
|
||||
|
||||
/**
|
||||
* The previous cluster state for this change event.
|
||||
*/
|
||||
public ClusterState previousState() {
|
||||
return this.previousState;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> iff the routing tables (for all indices) have
|
||||
* changed between the previous cluster state and the current cluster state.
|
||||
* Note that this is an object reference equality test, not an equals test.
|
||||
*/
|
||||
public boolean routingTableChanged() {
|
||||
return state.routingTable() != previousState.routingTable();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> iff the routing table has changed for the given index.
|
||||
* Note that this is an object reference equality test, not an equals test.
|
||||
*/
|
||||
public boolean indexRoutingTableChanged(String index) {
|
||||
Objects.requireNonNull(index, "index must not be null");
|
||||
if (!state.routingTable().hasIndex(index) && !previousState.routingTable().hasIndex(index)) {
|
||||
return false;
|
||||
}
|
||||
|
@ -82,9 +101,6 @@ public class ClusterChangedEvent {
|
|||
* Returns the indices created in this event
|
||||
*/
|
||||
public List<String> indicesCreated() {
|
||||
if (previousState == null) {
|
||||
return Arrays.asList(state.metaData().indices().keys().toArray(String.class));
|
||||
}
|
||||
if (!metaDataChanged()) {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
|
@ -105,20 +121,14 @@ public class ClusterChangedEvent {
|
|||
* Returns the indices deleted in this event
|
||||
*/
|
||||
public List<String> indicesDeleted() {
|
||||
|
||||
// if the new cluster state has a new master then we cannot know if an index which is not in the cluster state
|
||||
// is actually supposed to be deleted or imported as dangling instead. for example a new master might not have
|
||||
// the index in its cluster state because it was started with an empty data folder and in this case we want to
|
||||
// import as dangling. we check here for new master too to be on the safe side in this case.
|
||||
// This means that under certain conditions deleted indices might be reimported if a master fails while the deletion
|
||||
// request is issued and a node receives the cluster state that would trigger the deletion from the new master.
|
||||
// See test MetaDataWriteDataNodesTests.testIndicesDeleted()
|
||||
// If the new cluster state has a new cluster UUID, the likely scenario is that a node was elected
|
||||
// master that has had its data directory wiped out, in which case we don't want to delete the indices and lose data;
|
||||
// rather we want to import them as dangling indices instead. So we check here if the cluster UUID differs from the previous
|
||||
// cluster UUID, in which case, we don't want to delete indices that the master erroneously believes shouldn't exist.
|
||||
// See test DiscoveryWithServiceDisruptionsIT.testIndicesDeleted()
|
||||
// See discussion on https://github.com/elastic/elasticsearch/pull/9952 and
|
||||
// https://github.com/elastic/elasticsearch/issues/11665
|
||||
if (hasNewMaster() || previousState == null) {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
if (!metaDataChanged()) {
|
||||
if (metaDataChanged() == false || isNewCluster()) {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
List<String> deleted = null;
|
||||
|
@ -134,10 +144,20 @@ public class ClusterChangedEvent {
|
|||
return deleted == null ? Collections.<String>emptyList() : deleted;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> iff the metadata for the cluster has changed between
|
||||
* the previous cluster state and the new cluster state. Note that this is an object
|
||||
* reference equality test, not an equals test.
|
||||
*/
|
||||
public boolean metaDataChanged() {
|
||||
return state.metaData() != previousState.metaData();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> iff the {@link IndexMetaData} for a given index
|
||||
* has changed between the previous cluster state and the new cluster state.
|
||||
* Note that this is an object reference equality test, not an equals test.
|
||||
*/
|
||||
public boolean indexMetaDataChanged(IndexMetaData current) {
|
||||
MetaData previousMetaData = previousState.metaData();
|
||||
if (previousMetaData == null) {
|
||||
|
@ -152,46 +172,56 @@ public class ClusterChangedEvent {
|
|||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> iff the cluster level blocks have changed between cluster states.
|
||||
* Note that this is an object reference equality test, not an equals test.
|
||||
*/
|
||||
public boolean blocksChanged() {
|
||||
return state.blocks() != previousState.blocks();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> iff the local node is the mater node of the cluster.
|
||||
*/
|
||||
public boolean localNodeMaster() {
|
||||
return state.nodes().localNodeMaster();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the {@link org.elasticsearch.cluster.node.DiscoveryNodes.Delta} between
|
||||
* the previous cluster state and the new cluster state.
|
||||
*/
|
||||
public DiscoveryNodes.Delta nodesDelta() {
|
||||
return this.nodesDelta;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> iff nodes have been removed from the cluster since the last cluster state.
|
||||
*/
|
||||
public boolean nodesRemoved() {
|
||||
return nodesDelta.removed();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> iff nodes have been added from the cluster since the last cluster state.
|
||||
*/
|
||||
public boolean nodesAdded() {
|
||||
return nodesDelta.added();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> iff nodes have been changed (added or removed) from the cluster since the last cluster state.
|
||||
*/
|
||||
public boolean nodesChanged() {
|
||||
return nodesRemoved() || nodesAdded();
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if this cluster state comes from a different master than the previous one.
|
||||
* This is a workaround for the scenario where a node misses a cluster state that has either
|
||||
* no master block or state not recovered flag set. In this case we must make sure that
|
||||
* if an index is missing from the cluster state is not deleted immediately but instead imported
|
||||
* as dangling. See discussion on https://github.com/elastic/elasticsearch/pull/9952
|
||||
*/
|
||||
private boolean hasNewMaster() {
|
||||
String oldMaster = previousState().getNodes().masterNodeId();
|
||||
String newMaster = state().getNodes().masterNodeId();
|
||||
if (oldMaster == null && newMaster == null) {
|
||||
return false;
|
||||
}
|
||||
if (oldMaster == null && newMaster != null) {
|
||||
return true;
|
||||
}
|
||||
return oldMaster.equals(newMaster) == false;
|
||||
// Determines whether or not the current cluster state represents an entirely
|
||||
// different cluster from the previous cluster state, which will happen when a
|
||||
// master node is elected that has never been part of the cluster before.
|
||||
private boolean isNewCluster() {
|
||||
final String prevClusterUUID = previousState.metaData().clusterUUID();
|
||||
final String currClusterUUID = state.metaData().clusterUUID();
|
||||
return prevClusterUUID.equals(currClusterUUID) == false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -56,6 +56,11 @@ public interface ClusterService extends LifecycleComponent<ClusterService> {
|
|||
*/
|
||||
void removeInitialStateBlock(ClusterBlock block) throws IllegalStateException;
|
||||
|
||||
/**
|
||||
* Remove an initial block to be set on the first cluster state created.
|
||||
*/
|
||||
void removeInitialStateBlock(int blockId) throws IllegalStateException;
|
||||
|
||||
/**
|
||||
* The operation routing.
|
||||
*/
|
||||
|
|
|
@ -51,7 +51,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.discovery.DiscoveryService;
|
||||
import org.elasticsearch.discovery.local.LocalDiscovery;
|
||||
import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction;
|
||||
|
||||
|
@ -70,7 +69,7 @@ import java.util.Set;
|
|||
* and cluster state {@link #status}, which is updated during cluster state publishing and applying
|
||||
* processing. The cluster state can be updated only on the master node. All updates are performed by on a
|
||||
* single thread and controlled by the {@link InternalClusterService}. After every update the
|
||||
* {@link DiscoveryService#publish} method publishes new version of the cluster state to all other nodes in the
|
||||
* {@link Discovery#publish} method publishes new version of the cluster state to all other nodes in the
|
||||
* cluster. The actual publishing mechanism is delegated to the {@link Discovery#publish} method and depends on
|
||||
* the type of discovery. For example, for local discovery it is implemented by the {@link LocalDiscovery#publish}
|
||||
* method. In the Zen Discovery it is handled in the {@link PublishClusterStateAction#publish} method. The
|
||||
|
|
|
@ -340,6 +340,12 @@ public class ClusterBlocks extends AbstractDiffable<ClusterBlocks> {
|
|||
return this;
|
||||
}
|
||||
|
||||
public Builder removeGlobalBlock(int blockId) {
|
||||
global.removeIf(block -> block.id() == blockId);
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
public Builder addIndexBlock(String index, ClusterBlock block) {
|
||||
if (!indices.containsKey(index)) {
|
||||
indices.put(index, new HashSet<>());
|
||||
|
|
|
@ -913,11 +913,11 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
}
|
||||
}
|
||||
} else if ("warmers".equals(currentFieldName)) {
|
||||
// TODO: do this in 4.0:
|
||||
// TODO: do this in 6.0:
|
||||
// throw new IllegalArgumentException("Warmers are not supported anymore - are you upgrading from 1.x?");
|
||||
// ignore: warmers have been removed in 3.0 and are
|
||||
// ignore: warmers have been removed in 5.0 and are
|
||||
// simply ignored when upgrading from 2.x
|
||||
assert Version.CURRENT.major <= 3;
|
||||
assert Version.CURRENT.major <= 5;
|
||||
parser.skipChildren();
|
||||
} else {
|
||||
// check if its a custom index metadata
|
||||
|
|
|
@ -19,12 +19,14 @@
|
|||
|
||||
package org.elasticsearch.cluster.metadata;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.close.CloseIndexClusterStateUpdateRequest;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexClusterStateUpdateRequest;
|
||||
import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.RestoreInProgress;
|
||||
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
|
||||
import org.elasticsearch.cluster.block.ClusterBlock;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
|
@ -37,11 +39,14 @@ import org.elasticsearch.common.component.AbstractComponent;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* Service responsible for submitting open/close index requests
|
||||
|
@ -78,7 +83,7 @@ public class MetaDataIndexStateService extends AbstractComponent {
|
|||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
List<String> indicesToClose = new ArrayList<>();
|
||||
Set<String> indicesToClose = new HashSet<>();
|
||||
for (String index : request.indices()) {
|
||||
IndexMetaData indexMetaData = currentState.metaData().index(index);
|
||||
if (indexMetaData == null) {
|
||||
|
@ -94,6 +99,28 @@ public class MetaDataIndexStateService extends AbstractComponent {
|
|||
return currentState;
|
||||
}
|
||||
|
||||
// Check if any of the indices to be closed are currently being restored from a snapshot and fail closing if such an index
|
||||
// is found as closing an index that is being restored makes the index unusable (it cannot be recovered).
|
||||
RestoreInProgress restore = currentState.custom(RestoreInProgress.TYPE);
|
||||
if (restore != null) {
|
||||
Set<String> indicesToFail = null;
|
||||
for (RestoreInProgress.Entry entry : restore.entries()) {
|
||||
for (ObjectObjectCursor<ShardId, RestoreInProgress.ShardRestoreStatus> shard : entry.shards()) {
|
||||
if (!shard.value.state().completed()) {
|
||||
if (indicesToClose.contains(shard.key.getIndexName())) {
|
||||
if (indicesToFail == null) {
|
||||
indicesToFail = new HashSet<>();
|
||||
}
|
||||
indicesToFail.add(shard.key.getIndexName());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (indicesToFail != null) {
|
||||
throw new IllegalArgumentException("Cannot close indices that are being restored: " + indicesToFail);
|
||||
}
|
||||
}
|
||||
|
||||
logger.info("closing indices [{}]", indicesAsString);
|
||||
|
||||
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
|
||||
|
|
|
@ -46,6 +46,11 @@ import static org.elasticsearch.common.transport.TransportAddressSerializers.add
|
|||
*/
|
||||
public class DiscoveryNode implements Streamable, ToXContent {
|
||||
|
||||
public static final String DATA_ATTR = "data";
|
||||
public static final String MASTER_ATTR = "master";
|
||||
public static final String CLIENT_ATTR = "client";
|
||||
public static final String INGEST_ATTR = "ingest";
|
||||
|
||||
public static boolean localNode(Settings settings) {
|
||||
if (Node.NODE_LOCAL_SETTING.exists(settings)) {
|
||||
return Node.NODE_LOCAL_SETTING.get(settings);
|
||||
|
@ -204,16 +209,6 @@ public class DiscoveryNode implements Streamable, ToXContent {
|
|||
this.version = version;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should this node form a connection to the provided node.
|
||||
*/
|
||||
public boolean shouldConnectTo(DiscoveryNode otherNode) {
|
||||
if (clientNode() && otherNode.clientNode()) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* The address that the node can be communicated with.
|
||||
*/
|
||||
|
@ -274,7 +269,7 @@ public class DiscoveryNode implements Streamable, ToXContent {
|
|||
* Should this node hold data (shards) or not.
|
||||
*/
|
||||
public boolean dataNode() {
|
||||
String data = attributes.get("data");
|
||||
String data = attributes.get(DATA_ATTR);
|
||||
if (data == null) {
|
||||
return !clientNode();
|
||||
}
|
||||
|
@ -292,7 +287,7 @@ public class DiscoveryNode implements Streamable, ToXContent {
|
|||
* Is the node a client node or not.
|
||||
*/
|
||||
public boolean clientNode() {
|
||||
String client = attributes.get("client");
|
||||
String client = attributes.get(CLIENT_ATTR);
|
||||
return client != null && Booleans.parseBooleanExact(client);
|
||||
}
|
||||
|
||||
|
@ -304,7 +299,7 @@ public class DiscoveryNode implements Streamable, ToXContent {
|
|||
* Can this node become master or not.
|
||||
*/
|
||||
public boolean masterNode() {
|
||||
String master = attributes.get("master");
|
||||
String master = attributes.get(MASTER_ATTR);
|
||||
if (master == null) {
|
||||
return !clientNode();
|
||||
}
|
||||
|
@ -322,7 +317,7 @@ public class DiscoveryNode implements Streamable, ToXContent {
|
|||
* Returns a boolean that tells whether this an ingest node or not
|
||||
*/
|
||||
public boolean isIngestNode() {
|
||||
String ingest = attributes.get("ingest");
|
||||
String ingest = attributes.get(INGEST_ATTR);
|
||||
return ingest == null ? true : Booleans.parseBooleanExact(ingest);
|
||||
}
|
||||
|
||||
|
|
|
@ -29,7 +29,6 @@ import org.elasticsearch.common.component.AbstractComponent;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.math.MathUtils;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.shard.ShardNotFoundException;
|
||||
|
@ -67,10 +66,6 @@ public class OperationRouting extends AbstractComponent {
|
|||
return preferenceActiveShardIterator(indexShard, clusterState.nodes().localNodeId(), clusterState.nodes(), preference);
|
||||
}
|
||||
|
||||
public GroupShardsIterator broadcastDeleteShards(ClusterState clusterState, String index) {
|
||||
return indexRoutingTable(clusterState, index).groupByShardsIt();
|
||||
}
|
||||
|
||||
public int searchShardsCount(ClusterState clusterState, String[] concreteIndices, @Nullable Map<String, Set<String>> routing) {
|
||||
final Set<IndexShardRoutingTable> shards = computeTargetedShards(clusterState, concreteIndices, routing);
|
||||
return shards.size();
|
||||
|
|
|
@ -266,7 +266,7 @@ public final class ShardRouting implements Streamable, ToXContent {
|
|||
return false;
|
||||
}
|
||||
|
||||
if (indexMetaData.activeAllocationIds(id()).isEmpty() && indexMetaData.getCreationVersion().onOrAfter(Version.V_3_0_0)) {
|
||||
if (indexMetaData.activeAllocationIds(id()).isEmpty() && indexMetaData.getCreationVersion().onOrAfter(Version.V_5_0_0)) {
|
||||
// when no shards with this id have ever been active for this index
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -310,7 +310,7 @@ public class AllocationService extends AbstractComponent {
|
|||
}
|
||||
|
||||
// move shards that no longer can be allocated
|
||||
changed |= moveShards(allocation);
|
||||
changed |= shardsAllocators.moveShards(allocation);
|
||||
|
||||
// rebalance
|
||||
changed |= shardsAllocators.rebalance(allocation);
|
||||
|
@ -327,46 +327,6 @@ public class AllocationService extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
|
||||
private boolean moveShards(RoutingAllocation allocation) {
|
||||
boolean changed = false;
|
||||
|
||||
// create a copy of the shards interleaving between nodes, and check if they can remain
|
||||
List<ShardRouting> shards = new ArrayList<>();
|
||||
int index = 0;
|
||||
boolean found = true;
|
||||
final RoutingNodes routingNodes = allocation.routingNodes();
|
||||
while (found) {
|
||||
found = false;
|
||||
for (RoutingNode routingNode : routingNodes) {
|
||||
if (index >= routingNode.size()) {
|
||||
continue;
|
||||
}
|
||||
found = true;
|
||||
shards.add(routingNode.get(index));
|
||||
}
|
||||
index++;
|
||||
}
|
||||
for (int i = 0; i < shards.size(); i++) {
|
||||
ShardRouting shardRouting = shards.get(i);
|
||||
// we can only move started shards...
|
||||
if (!shardRouting.started()) {
|
||||
continue;
|
||||
}
|
||||
final RoutingNode routingNode = routingNodes.node(shardRouting.currentNodeId());
|
||||
Decision decision = allocation.deciders().canRemain(shardRouting, routingNode, allocation);
|
||||
if (decision.type() == Decision.Type.NO) {
|
||||
logger.debug("[{}][{}] allocated on [{}], but can no longer be allocated on it, moving...", shardRouting.index(), shardRouting.id(), routingNode.node());
|
||||
boolean moved = shardsAllocators.move(shardRouting, routingNode, allocation);
|
||||
if (!moved) {
|
||||
logger.debug("[{}][{}] can't move", shardRouting.index(), shardRouting.id());
|
||||
} else {
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return changed;
|
||||
}
|
||||
|
||||
private boolean electPrimariesAndUnassignedDanglingReplicas(RoutingAllocation allocation) {
|
||||
boolean changed = false;
|
||||
final RoutingNodes routingNodes = allocation.routingNodes();
|
||||
|
|
|
@ -42,6 +42,7 @@ import org.elasticsearch.common.settings.Setting;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.gateway.PriorityComparator;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
|
@ -49,6 +50,7 @@ import java.util.HashMap;
|
|||
import java.util.HashSet;
|
||||
import java.util.IdentityHashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.Predicate;
|
||||
|
@ -119,9 +121,9 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
}
|
||||
|
||||
@Override
|
||||
public boolean move(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
|
||||
public boolean moveShards(RoutingAllocation allocation) {
|
||||
final Balancer balancer = new Balancer(logger, allocation, weightFunction, threshold);
|
||||
return balancer.move(shardRouting, node);
|
||||
return balancer.moveShards();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -489,56 +491,93 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
}
|
||||
|
||||
/**
|
||||
* This function executes a move operation moving the given shard from
|
||||
* the given node to the minimal eligible node with respect to the
|
||||
* weight function. Iff the shard is moved the shard will be set to
|
||||
* Move started shards that can not be allocated to a node anymore
|
||||
*
|
||||
* For each shard to be moved this function executes a move operation
|
||||
* to the minimal eligible node with respect to the
|
||||
* weight function. If a shard is moved the shard will be set to
|
||||
* {@link ShardRoutingState#RELOCATING} and a shadow instance of this
|
||||
* shard is created with an incremented version in the state
|
||||
* {@link ShardRoutingState#INITIALIZING}.
|
||||
*
|
||||
* @return <code>true</code> iff the shard has successfully been moved.
|
||||
* @return <code>true</code> if the allocation has changed, otherwise <code>false</code>
|
||||
*/
|
||||
public boolean move(ShardRouting shard, RoutingNode node ) {
|
||||
if (nodes.isEmpty() || !shard.started()) {
|
||||
/* with no nodes or a not started shard this is pointless */
|
||||
public boolean moveShards() {
|
||||
if (nodes.isEmpty()) {
|
||||
/* with no nodes this is pointless */
|
||||
return false;
|
||||
}
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("Try moving shard [{}] from [{}]", shard, node);
|
||||
}
|
||||
final RoutingNodes.UnassignedShards unassigned = routingNodes.unassigned();
|
||||
boolean changed = initialize(routingNodes, unassigned);
|
||||
if (!changed) {
|
||||
final ModelNode sourceNode = nodes.get(node.nodeId());
|
||||
assert sourceNode != null;
|
||||
final NodeSorter sorter = newNodeSorter();
|
||||
sorter.reset(shard.getIndexName());
|
||||
final ModelNode[] nodes = sorter.modelNodes;
|
||||
assert sourceNode.containsShard(shard);
|
||||
/*
|
||||
* the sorter holds the minimum weight node first for the shards index.
|
||||
* We now walk through the nodes until we find a node to allocate the shard.
|
||||
* This is not guaranteed to be balanced after this operation we still try best effort to
|
||||
* allocate on the minimal eligible node.
|
||||
*/
|
||||
|
||||
for (ModelNode currentNode : nodes) {
|
||||
if (currentNode.getNodeId().equals(node.nodeId())) {
|
||||
// Create a copy of the started shards interleaving between nodes, and check if they can remain. In the presence of throttling
|
||||
// shard movements, the goal of this iteration order is to achieve a fairer movement of shards from the nodes that are
|
||||
// offloading the shards.
|
||||
List<ShardRouting> shards = new ArrayList<>();
|
||||
int index = 0;
|
||||
boolean found = true;
|
||||
while (found) {
|
||||
found = false;
|
||||
for (RoutingNode routingNode : routingNodes) {
|
||||
if (index >= routingNode.size()) {
|
||||
continue;
|
||||
}
|
||||
RoutingNode target = currentNode.getRoutingNode(routingNodes);
|
||||
Decision allocationDecision = allocation.deciders().canAllocate(shard, target, allocation);
|
||||
Decision rebalanceDecision = allocation.deciders().canRebalance(shard, allocation);
|
||||
Decision decision = new Decision.Multi().add(allocationDecision).add(rebalanceDecision);
|
||||
if (decision.type() == Type.YES) { // TODO maybe we can respect throttling here too?
|
||||
sourceNode.removeShard(shard);
|
||||
ShardRouting targetRelocatingShard = routingNodes.relocate(shard, target.nodeId(), allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
|
||||
currentNode.addShard(targetRelocatingShard, decision);
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("Moved shard [{}] to node [{}]", shard, currentNode.getNodeId());
|
||||
found = true;
|
||||
ShardRouting shardRouting = routingNode.get(index);
|
||||
// we can only move started shards...
|
||||
if (shardRouting.started()) {
|
||||
shards.add(shardRouting);
|
||||
}
|
||||
}
|
||||
index++;
|
||||
}
|
||||
if (shards.isEmpty()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
final RoutingNodes.UnassignedShards unassigned = routingNodes.unassigned();
|
||||
boolean changed = initialize(routingNodes, unassigned);
|
||||
if (changed == false) {
|
||||
final NodeSorter sorter = newNodeSorter();
|
||||
final ModelNode[] modelNodes = sorter.modelNodes;
|
||||
for (ShardRouting shardRouting : shards) {
|
||||
final ModelNode sourceNode = nodes.get(shardRouting.currentNodeId());
|
||||
assert sourceNode != null && sourceNode.containsShard(shardRouting);
|
||||
final RoutingNode routingNode = sourceNode.getRoutingNode(routingNodes);
|
||||
Decision decision = allocation.deciders().canRemain(shardRouting, routingNode, allocation);
|
||||
if (decision.type() == Decision.Type.NO) {
|
||||
logger.debug("[{}][{}] allocated on [{}], but can no longer be allocated on it, moving...", shardRouting.index(), shardRouting.id(), routingNode.node());
|
||||
sorter.reset(shardRouting.getIndexName());
|
||||
/*
|
||||
* the sorter holds the minimum weight node first for the shards index.
|
||||
* We now walk through the nodes until we find a node to allocate the shard.
|
||||
* This is not guaranteed to be balanced after this operation we still try best effort to
|
||||
* allocate on the minimal eligible node.
|
||||
*/
|
||||
boolean moved = false;
|
||||
for (ModelNode currentNode : modelNodes) {
|
||||
if (currentNode == sourceNode) {
|
||||
continue;
|
||||
}
|
||||
RoutingNode target = currentNode.getRoutingNode(routingNodes);
|
||||
Decision allocationDecision = allocation.deciders().canAllocate(shardRouting, target, allocation);
|
||||
Decision rebalanceDecision = allocation.deciders().canRebalance(shardRouting, allocation);
|
||||
if (allocationDecision.type() == Type.YES && rebalanceDecision.type() == Type.YES) { // TODO maybe we can respect throttling here too?
|
||||
Decision sourceDecision = sourceNode.removeShard(shardRouting);
|
||||
ShardRouting targetRelocatingShard = routingNodes.relocate(shardRouting, target.nodeId(), allocation.clusterInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
|
||||
// re-add (now relocating shard) to source node
|
||||
sourceNode.addShard(shardRouting, sourceDecision);
|
||||
Decision targetDecision = new Decision.Multi().add(allocationDecision).add(rebalanceDecision);
|
||||
currentNode.addShard(targetRelocatingShard, targetDecision);
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("Moved shard [{}] to node [{}]", shardRouting, routingNode.node());
|
||||
}
|
||||
moved = true;
|
||||
changed = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (moved == false) {
|
||||
logger.debug("[{}][{}] can't move", shardRouting.index(), shardRouting.id());
|
||||
}
|
||||
changed = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.cluster.routing.allocation.allocator;
|
||||
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
|
||||
|
@ -36,22 +35,22 @@ import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation;
|
|||
public interface ShardsAllocator {
|
||||
|
||||
/**
|
||||
* Applies changes on started nodes based on the implemented algorithm. For example if a
|
||||
* shard has changed to {@link ShardRoutingState#STARTED} from {@link ShardRoutingState#RELOCATING}
|
||||
* Applies changes on started nodes based on the implemented algorithm. For example if a
|
||||
* shard has changed to {@link ShardRoutingState#STARTED} from {@link ShardRoutingState#RELOCATING}
|
||||
* this allocator might apply some cleanups on the node that used to hold the shard.
|
||||
* @param allocation all started {@link ShardRouting shards}
|
||||
*/
|
||||
void applyStartedShards(StartedRerouteAllocation allocation);
|
||||
|
||||
/**
|
||||
* Applies changes on failed nodes based on the implemented algorithm.
|
||||
* Applies changes on failed nodes based on the implemented algorithm.
|
||||
* @param allocation all failed {@link ShardRouting shards}
|
||||
*/
|
||||
void applyFailedShards(FailedRerouteAllocation allocation);
|
||||
|
||||
/**
|
||||
* Assign all unassigned shards to nodes
|
||||
*
|
||||
* Assign all unassigned shards to nodes
|
||||
*
|
||||
* @param allocation current node allocation
|
||||
* @return <code>true</code> if the allocation has changed, otherwise <code>false</code>
|
||||
*/
|
||||
|
@ -59,19 +58,17 @@ public interface ShardsAllocator {
|
|||
|
||||
/**
|
||||
* Rebalancing number of shards on all nodes
|
||||
*
|
||||
*
|
||||
* @param allocation current node allocation
|
||||
* @return <code>true</code> if the allocation has changed, otherwise <code>false</code>
|
||||
*/
|
||||
boolean rebalance(RoutingAllocation allocation);
|
||||
|
||||
/**
|
||||
* Moves a shard from the given node to other node.
|
||||
*
|
||||
* @param shardRouting the shard to move
|
||||
* @param node A node containing the shard
|
||||
* Move started shards that can not be allocated to a node anymore
|
||||
*
|
||||
* @param allocation current node allocation
|
||||
* @return <code>true</code> if the allocation has changed, otherwise <code>false</code>
|
||||
*/
|
||||
boolean move(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation);
|
||||
boolean moveShards(RoutingAllocation allocation);
|
||||
}
|
||||
|
|
|
@ -19,8 +19,6 @@
|
|||
|
||||
package org.elasticsearch.cluster.routing.allocation.allocator;
|
||||
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation;
|
||||
|
@ -96,7 +94,7 @@ public class ShardsAllocators extends AbstractComponent implements ShardsAllocat
|
|||
}
|
||||
|
||||
@Override
|
||||
public boolean move(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
|
||||
return allocator.move(shardRouting, node, allocation);
|
||||
public boolean moveShards(RoutingAllocation allocation) {
|
||||
return allocator.moveShards(allocation);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -44,6 +44,7 @@ import org.elasticsearch.cluster.routing.OperationRouting;
|
|||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.Randomness;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
|
@ -64,7 +65,6 @@ import org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor;
|
|||
import org.elasticsearch.common.util.concurrent.PrioritizedRunnable;
|
||||
import org.elasticsearch.common.util.iterable.Iterables;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.discovery.DiscoveryService;
|
||||
import org.elasticsearch.tasks.TaskManager;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
@ -76,7 +76,9 @@ import java.util.Iterator;
|
|||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Queue;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.concurrent.Executor;
|
||||
|
@ -84,6 +86,7 @@ import java.util.concurrent.Future;
|
|||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory;
|
||||
|
@ -97,9 +100,12 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
public static final Setting<TimeValue> CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING = Setting.positiveTimeSetting("cluster.service.reconnect_interval", TimeValue.timeValueSeconds(10), false, Setting.Scope.CLUSTER);
|
||||
|
||||
public static final String UPDATE_THREAD_NAME = "clusterService#updateTask";
|
||||
public static final Setting<Long> NODE_ID_SEED_SETTING =
|
||||
// don't use node.id.seed so it won't be seen as an attribute
|
||||
Setting.longSetting("node_id.seed", 0L, Long.MIN_VALUE, false, Setting.Scope.CLUSTER);
|
||||
private final ThreadPool threadPool;
|
||||
|
||||
private final DiscoveryService discoveryService;
|
||||
private BiConsumer<ClusterChangedEvent, Discovery.AckListener> clusterStatePublisher;
|
||||
|
||||
private final OperationRouting operationRouting;
|
||||
|
||||
|
@ -139,12 +145,11 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
private volatile ScheduledFuture reconnectToNodes;
|
||||
|
||||
@Inject
|
||||
public InternalClusterService(Settings settings, DiscoveryService discoveryService, OperationRouting operationRouting, TransportService transportService,
|
||||
public InternalClusterService(Settings settings, OperationRouting operationRouting, TransportService transportService,
|
||||
ClusterSettings clusterSettings, ThreadPool threadPool, ClusterName clusterName, DiscoveryNodeService discoveryNodeService, Version version) {
|
||||
super(settings);
|
||||
this.operationRouting = operationRouting;
|
||||
this.transportService = transportService;
|
||||
this.discoveryService = discoveryService;
|
||||
this.threadPool = threadPool;
|
||||
this.clusterSettings = clusterSettings;
|
||||
this.discoveryNodeService = discoveryNodeService;
|
||||
|
@ -161,7 +166,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
|
||||
localNodeMasterListeners = new LocalNodeMasterListeners(threadPool);
|
||||
|
||||
initialBlocks = ClusterBlocks.builder().addGlobalBlock(discoveryService.getNoMasterBlock());
|
||||
initialBlocks = ClusterBlocks.builder();
|
||||
|
||||
taskManager = transportService.getTaskManager();
|
||||
}
|
||||
|
@ -170,6 +175,10 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
this.slowTaskLoggingThreshold = slowTaskLoggingThreshold;
|
||||
}
|
||||
|
||||
public void setClusterStatePublisher(BiConsumer<ClusterChangedEvent, Discovery.AckListener> publisher) {
|
||||
clusterStatePublisher = publisher;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addInitialStateBlock(ClusterBlock block) throws IllegalStateException {
|
||||
if (lifecycle.started()) {
|
||||
|
@ -180,14 +189,20 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
|
||||
@Override
|
||||
public void removeInitialStateBlock(ClusterBlock block) throws IllegalStateException {
|
||||
removeInitialStateBlock(block.id());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeInitialStateBlock(int blockId) throws IllegalStateException {
|
||||
if (lifecycle.started()) {
|
||||
throw new IllegalStateException("can't set initial block when started");
|
||||
}
|
||||
initialBlocks.removeGlobalBlock(block);
|
||||
initialBlocks.removeGlobalBlock(blockId);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStart() {
|
||||
Objects.requireNonNull(clusterStatePublisher, "please set a cluster state publisher before starting");
|
||||
add(localNodeMasterListeners);
|
||||
add(taskManager);
|
||||
this.clusterState = ClusterState.builder(clusterState).blocks(initialBlocks).build();
|
||||
|
@ -195,7 +210,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
this.reconnectToNodes = threadPool.schedule(reconnectInterval, ThreadPool.Names.GENERIC, new ReconnectToNodes());
|
||||
Map<String, String> nodeAttributes = discoveryNodeService.buildAttributes();
|
||||
// note, we rely on the fact that its a new id each time we start, see FD and "kill -9" handling
|
||||
final String nodeId = DiscoveryService.generateNodeId(settings);
|
||||
final String nodeId = generateNodeId(settings);
|
||||
final TransportAddress publishAddress = transportService.boundAddress().publishAddress();
|
||||
DiscoveryNode localNode = new DiscoveryNode(settings.get("node.name"), nodeId, publishAddress, nodeAttributes, version);
|
||||
DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder().put(localNode).localNodeId(localNode.id());
|
||||
|
@ -555,9 +570,6 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
|
||||
// TODO, do this in parallel (and wait)
|
||||
for (DiscoveryNode node : nodesDelta.addedNodes()) {
|
||||
if (!nodeRequiresConnection(node)) {
|
||||
continue;
|
||||
}
|
||||
try {
|
||||
transportService.connectToNode(node);
|
||||
} catch (Throwable e) {
|
||||
|
@ -572,7 +584,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
if (newClusterState.nodes().localNodeMaster()) {
|
||||
logger.debug("publishing cluster state version [{}]", newClusterState.version());
|
||||
try {
|
||||
discoveryService.publish(clusterChangedEvent, ackListener);
|
||||
clusterStatePublisher.accept(clusterChangedEvent, ackListener);
|
||||
} catch (Discovery.FailedToCommitClusterStateException t) {
|
||||
logger.warn("failing [{}]: failed to commit cluster state version [{}]", t, source, newClusterState.version());
|
||||
proccessedListeners.forEach(task -> task.listener.onFailure(task.source, t));
|
||||
|
@ -809,9 +821,6 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
if (lifecycle.stoppedOrClosed()) {
|
||||
return;
|
||||
}
|
||||
if (!nodeRequiresConnection(node)) {
|
||||
continue;
|
||||
}
|
||||
if (clusterState.nodes().nodeExists(node.id())) { // we double check existence of node since connectToNode might take time...
|
||||
if (!transportService.nodeConnected(node)) {
|
||||
try {
|
||||
|
@ -853,8 +862,9 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
}
|
||||
}
|
||||
|
||||
private boolean nodeRequiresConnection(DiscoveryNode node) {
|
||||
return localNode().shouldConnectTo(node);
|
||||
public static String generateNodeId(Settings settings) {
|
||||
Random random = Randomness.get(settings, NODE_ID_SEED_SETTING);
|
||||
return Strings.randomBase64UUID(random);
|
||||
}
|
||||
|
||||
private static class LocalNodeMasterListeners implements ClusterStateListener {
|
||||
|
|
|
@ -18,26 +18,23 @@
|
|||
*/
|
||||
package org.elasticsearch.common;
|
||||
|
||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
|
||||
import java.util.EnumSet;
|
||||
import java.util.HashSet;
|
||||
|
||||
/**
|
||||
* Holds a field that can be found in a request while parsing and its different variants, which may be deprecated.
|
||||
*/
|
||||
public class ParseField {
|
||||
|
||||
private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(ParseField.class));
|
||||
|
||||
private final String camelCaseName;
|
||||
private final String underscoreName;
|
||||
private final String[] deprecatedNames;
|
||||
private String allReplacedWith = null;
|
||||
|
||||
static final EnumSet<Flag> EMPTY_FLAGS = EnumSet.noneOf(Flag.class);
|
||||
static final EnumSet<Flag> STRICT_FLAGS = EnumSet.of(Flag.STRICT);
|
||||
|
||||
enum Flag {
|
||||
STRICT
|
||||
}
|
||||
|
||||
public ParseField(String value, String... deprecatedNames) {
|
||||
camelCaseName = Strings.toCamelCase(value);
|
||||
underscoreName = Strings.toUnderscoreCase(value);
|
||||
|
@ -80,19 +77,21 @@ public class ParseField {
|
|||
return parseField;
|
||||
}
|
||||
|
||||
boolean match(String currentFieldName, EnumSet<Flag> flags) {
|
||||
boolean match(String currentFieldName, boolean strict) {
|
||||
if (allReplacedWith == null && (currentFieldName.equals(camelCaseName) || currentFieldName.equals(underscoreName))) {
|
||||
return true;
|
||||
}
|
||||
String msg;
|
||||
for (String depName : deprecatedNames) {
|
||||
if (currentFieldName.equals(depName)) {
|
||||
if (flags.contains(Flag.STRICT)) {
|
||||
msg = "Deprecated field [" + currentFieldName + "] used, expected [" + underscoreName + "] instead";
|
||||
if (allReplacedWith != null) {
|
||||
msg = "Deprecated field [" + currentFieldName + "] used, replaced by [" + allReplacedWith + "]";
|
||||
}
|
||||
msg = "Deprecated field [" + currentFieldName + "] used, expected [" + underscoreName + "] instead";
|
||||
if (allReplacedWith != null) {
|
||||
msg = "Deprecated field [" + currentFieldName + "] used, replaced by [" + allReplacedWith + "]";
|
||||
}
|
||||
if (strict) {
|
||||
throw new IllegalArgumentException(msg);
|
||||
} else {
|
||||
DEPRECATION_LOGGER.deprecated(msg);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -21,29 +21,28 @@ package org.elasticsearch.common;
|
|||
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.util.EnumSet;
|
||||
|
||||
/**
|
||||
* Matcher to use in combination with {@link ParseField} while parsing requests. Matches a {@link ParseField}
|
||||
* against a field name and throw deprecation exception depending on the current value of the {@link #PARSE_STRICT} setting.
|
||||
*/
|
||||
public class ParseFieldMatcher {
|
||||
public static final String PARSE_STRICT = "index.query.parse.strict";
|
||||
public static final ParseFieldMatcher EMPTY = new ParseFieldMatcher(ParseField.EMPTY_FLAGS);
|
||||
public static final ParseFieldMatcher STRICT = new ParseFieldMatcher(ParseField.STRICT_FLAGS);
|
||||
public static final ParseFieldMatcher EMPTY = new ParseFieldMatcher(false);
|
||||
public static final ParseFieldMatcher STRICT = new ParseFieldMatcher(true);
|
||||
|
||||
private final EnumSet<ParseField.Flag> parseFlags;
|
||||
private final boolean strict;
|
||||
|
||||
public ParseFieldMatcher(Settings settings) {
|
||||
if (settings.getAsBoolean(PARSE_STRICT, false)) {
|
||||
this.parseFlags = EnumSet.of(ParseField.Flag.STRICT);
|
||||
} else {
|
||||
this.parseFlags = ParseField.EMPTY_FLAGS;
|
||||
}
|
||||
this(settings.getAsBoolean(PARSE_STRICT, false));
|
||||
}
|
||||
|
||||
public ParseFieldMatcher(EnumSet<ParseField.Flag> parseFlags) {
|
||||
this.parseFlags = parseFlags;
|
||||
public ParseFieldMatcher(boolean strict) {
|
||||
this.strict = strict;
|
||||
}
|
||||
|
||||
/** Should deprecated settings be rejected? */
|
||||
public boolean isStrict() {
|
||||
return strict;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -55,6 +54,6 @@ public class ParseFieldMatcher {
|
|||
* @return true whenever the parse field that we are looking for was found, false otherwise
|
||||
*/
|
||||
public boolean match(String fieldName, ParseField parseField) {
|
||||
return parseField.match(fieldName, parseFlags);
|
||||
return parseField.match(fieldName, strict);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -41,7 +41,7 @@ import java.util.concurrent.ThreadLocalRandom;
|
|||
* setting a reproducible seed. When running the Elasticsearch server
|
||||
* process, non-reproducible sources of randomness are provided (unless
|
||||
* a setting is provided for a module that exposes a seed setting (e.g.,
|
||||
* DiscoveryService#DISCOVERY_SEED_SETTING)).
|
||||
* DiscoveryService#NODE_ID_SEED_SETTING)).
|
||||
*/
|
||||
public final class Randomness {
|
||||
private static final Method currentMethod;
|
||||
|
|
|
@ -23,6 +23,7 @@ import java.io.BufferedReader;
|
|||
import java.io.Console;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStreamReader;
|
||||
import java.io.PrintWriter;
|
||||
import java.nio.charset.Charset;
|
||||
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
|
@ -52,6 +53,13 @@ public abstract class Terminal {
|
|||
/** The current verbosity for the terminal, defaulting to {@link Verbosity#NORMAL}. */
|
||||
private Verbosity verbosity = Verbosity.NORMAL;
|
||||
|
||||
/** The newline used when calling println. */
|
||||
private final String lineSeparator;
|
||||
|
||||
protected Terminal(String lineSeparator) {
|
||||
this.lineSeparator = lineSeparator;
|
||||
}
|
||||
|
||||
/** Sets the verbosity of the terminal. */
|
||||
void setVerbosity(Verbosity verbosity) {
|
||||
this.verbosity = verbosity;
|
||||
|
@ -63,8 +71,8 @@ public abstract class Terminal {
|
|||
/** Reads password text from the terminal input. See {@link Console#readPassword()}}. */
|
||||
public abstract char[] readSecret(String prompt);
|
||||
|
||||
/** Print a message directly to the terminal. */
|
||||
protected abstract void doPrint(String msg);
|
||||
/** Returns a Writer which can be used to write to the terminal directly. */
|
||||
public abstract PrintWriter getWriter();
|
||||
|
||||
/** Prints a line to the terminal at {@link Verbosity#NORMAL} verbosity level. */
|
||||
public final void println(String msg) {
|
||||
|
@ -74,7 +82,8 @@ public abstract class Terminal {
|
|||
/** Prints a line to the terminal at {@code verbosity} level. */
|
||||
public final void println(Verbosity verbosity, String msg) {
|
||||
if (this.verbosity.ordinal() >= verbosity.ordinal()) {
|
||||
doPrint(msg + System.lineSeparator());
|
||||
getWriter().print(msg + lineSeparator);
|
||||
getWriter().flush();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -82,14 +91,17 @@ public abstract class Terminal {
|
|||
|
||||
private static final Console console = System.console();
|
||||
|
||||
ConsoleTerminal() {
|
||||
super(System.lineSeparator());
|
||||
}
|
||||
|
||||
static boolean isSupported() {
|
||||
return console != null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void doPrint(String msg) {
|
||||
console.printf("%s", msg);
|
||||
console.flush();
|
||||
public PrintWriter getWriter() {
|
||||
return console.writer();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -105,16 +117,25 @@ public abstract class Terminal {
|
|||
|
||||
private static class SystemTerminal extends Terminal {
|
||||
|
||||
private final PrintWriter writer = newWriter();
|
||||
|
||||
SystemTerminal() {
|
||||
super(System.lineSeparator());
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "Writer for System.out")
|
||||
private static PrintWriter newWriter() {
|
||||
return new PrintWriter(System.out);
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressForbidden(reason = "System#out")
|
||||
public void doPrint(String msg) {
|
||||
System.out.print(msg);
|
||||
System.out.flush();
|
||||
public PrintWriter getWriter() {
|
||||
return writer;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String readText(String text) {
|
||||
doPrint(text);
|
||||
getWriter().print(text);
|
||||
BufferedReader reader = new BufferedReader(new InputStreamReader(System.in, Charset.defaultCharset()));
|
||||
try {
|
||||
return reader.readLine();
|
||||
|
|
|
@ -29,7 +29,7 @@ public class ShapesAvailability {
|
|||
static {
|
||||
boolean xSPATIAL4J_AVAILABLE;
|
||||
try {
|
||||
Class.forName("com.spatial4j.core.shape.impl.PointImpl");
|
||||
Class.forName("org.locationtech.spatial4j.shape.impl.PointImpl");
|
||||
xSPATIAL4J_AVAILABLE = true;
|
||||
} catch (Throwable t) {
|
||||
xSPATIAL4J_AVAILABLE = false;
|
||||
|
|
|
@ -19,9 +19,9 @@
|
|||
|
||||
package org.elasticsearch.common.geo;
|
||||
|
||||
import com.spatial4j.core.context.SpatialContext;
|
||||
import com.spatial4j.core.shape.Shape;
|
||||
import com.spatial4j.core.shape.ShapeCollection;
|
||||
import org.locationtech.spatial4j.context.SpatialContext;
|
||||
import org.locationtech.spatial4j.shape.Shape;
|
||||
import org.locationtech.spatial4j.shape.ShapeCollection;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
package org.elasticsearch.common.geo.builders;
|
||||
|
||||
import com.spatial4j.core.shape.Circle;
|
||||
import org.locationtech.spatial4j.shape.Circle;
|
||||
import com.vividsolutions.jts.geom.Coordinate;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
package org.elasticsearch.common.geo.builders;
|
||||
|
||||
import com.spatial4j.core.shape.Rectangle;
|
||||
import org.locationtech.spatial4j.shape.Rectangle;
|
||||
import com.vividsolutions.jts.geom.Coordinate;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
package org.elasticsearch.common.geo.builders;
|
||||
|
||||
import com.spatial4j.core.shape.Shape;
|
||||
import org.locationtech.spatial4j.shape.Shape;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.geo.XShapeCollection;
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
package org.elasticsearch.common.geo.builders;
|
||||
|
||||
import com.spatial4j.core.shape.Shape;
|
||||
import org.locationtech.spatial4j.shape.Shape;
|
||||
import com.vividsolutions.jts.geom.Coordinate;
|
||||
import com.vividsolutions.jts.geom.Geometry;
|
||||
import com.vividsolutions.jts.geom.GeometryFactory;
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
package org.elasticsearch.common.geo.builders;
|
||||
|
||||
import com.spatial4j.core.shape.Shape;
|
||||
import org.locationtech.spatial4j.shape.Shape;
|
||||
import com.vividsolutions.jts.geom.Coordinate;
|
||||
import com.vividsolutions.jts.geom.Geometry;
|
||||
import com.vividsolutions.jts.geom.LineString;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue