Merge branch 'master' into azure/fix-delete
This commit is contained in:
commit
fd602cc037
10
.travis.yml
10
.travis.yml
|
@ -1,10 +0,0 @@
|
|||
language: java
|
||||
jdk:
|
||||
- openjdk7
|
||||
|
||||
env:
|
||||
- ES_TEST_LOCAL=true
|
||||
- ES_TEST_LOCAL=false
|
||||
|
||||
notifications:
|
||||
email: false
|
|
@ -18,24 +18,18 @@ gradle assemble
|
|||
|
||||
== Other test options
|
||||
|
||||
To disable and enable network transport, set the `Des.node.mode`.
|
||||
To disable and enable network transport, set the `tests.es.node.mode` system property.
|
||||
|
||||
Use network transport:
|
||||
|
||||
------------------------------------
|
||||
-Des.node.mode=network
|
||||
-Dtests.es.node.mode=network
|
||||
------------------------------------
|
||||
|
||||
Use local transport (default since 1.3):
|
||||
|
||||
-------------------------------------
|
||||
-Des.node.mode=local
|
||||
-------------------------------------
|
||||
|
||||
Alternatively, you can set the `ES_TEST_LOCAL` environment variable:
|
||||
|
||||
-------------------------------------
|
||||
export ES_TEST_LOCAL=true && gradle test
|
||||
-Dtests.es.node.mode=local
|
||||
-------------------------------------
|
||||
|
||||
=== Running Elasticsearch from a checkout
|
||||
|
@ -201,7 +195,7 @@ gradle test -Dtests.timeoutSuite=5000! ...
|
|||
Change the logging level of ES (not gradle)
|
||||
|
||||
--------------------------------
|
||||
gradle test -Des.logger.level=DEBUG
|
||||
gradle test -Dtests.es.logger.level=DEBUG
|
||||
--------------------------------
|
||||
|
||||
Print all the logging output from the test runs to the commandline
|
||||
|
|
10
build.gradle
10
build.gradle
|
@ -81,7 +81,7 @@ subprojects {
|
|||
nexus {
|
||||
String buildSnapshot = System.getProperty('build.snapshot', 'true')
|
||||
if (buildSnapshot == 'false') {
|
||||
Repository repo = new RepositoryBuilder().findGitDir(new File('.')).build()
|
||||
Repository repo = new RepositoryBuilder().findGitDir(project.rootDir).build()
|
||||
String shortHash = repo.resolve('HEAD')?.name?.substring(0,7)
|
||||
repositoryUrl = project.hasProperty('build.repository') ? project.property('build.repository') : "file://${System.getenv('HOME')}/elasticsearch-releases/${version}-${shortHash}/"
|
||||
}
|
||||
|
@ -144,6 +144,14 @@ subprojects {
|
|||
// see https://discuss.gradle.org/t/add-custom-javadoc-option-that-does-not-take-an-argument/5959
|
||||
javadoc.options.encoding='UTF8'
|
||||
javadoc.options.addStringOption('Xdoclint:all,-missing', '-quiet')
|
||||
/*
|
||||
TODO: building javadocs with java 9 b118 is currently broken with weird errors, so
|
||||
for now this is commented out...try again with the next ea build...
|
||||
javadoc.executable = new File(project.javaHome, 'bin/javadoc')
|
||||
if (project.javaVersion == JavaVersion.VERSION_1_9) {
|
||||
// TODO: remove this hack! gradle should be passing this...
|
||||
javadoc.options.addStringOption('source', '8')
|
||||
}*/
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -84,7 +84,7 @@ dependencies {
|
|||
compile 'com.netflix.nebula:gradle-info-plugin:3.0.3'
|
||||
compile 'org.eclipse.jgit:org.eclipse.jgit:3.2.0.201312181205-r'
|
||||
compile 'com.perforce:p4java:2012.3.551082' // THIS IS SUPPOSED TO BE OPTIONAL IN THE FUTURE....
|
||||
compile 'de.thetaphi:forbiddenapis:2.0'
|
||||
compile 'de.thetaphi:forbiddenapis:2.1'
|
||||
compile 'com.bmuschko:gradle-nexus-plugin:2.3.1'
|
||||
compile 'org.apache.rat:apache-rat:0.11'
|
||||
}
|
||||
|
|
|
@ -143,7 +143,7 @@ class BuildPlugin implements Plugin<Project> {
|
|||
}
|
||||
|
||||
project.rootProject.ext.javaHome = javaHome
|
||||
project.rootProject.ext.javaVersion = javaVersion
|
||||
project.rootProject.ext.javaVersion = javaVersionEnum
|
||||
project.rootProject.ext.buildChecksDone = true
|
||||
}
|
||||
project.targetCompatibility = minimumJava
|
||||
|
@ -378,7 +378,7 @@ class BuildPlugin implements Plugin<Project> {
|
|||
* -serial because we don't use java serialization.
|
||||
*/
|
||||
// don't even think about passing args with -J-xxx, oracle will ask you to submit a bug report :)
|
||||
options.compilerArgs << '-Werror' << '-Xlint:all,-path,-serial' << '-Xdoclint:all' << '-Xdoclint:-missing'
|
||||
options.compilerArgs << '-Werror' << '-Xlint:all,-path,-serial,-options' << '-Xdoclint:all' << '-Xdoclint:-missing'
|
||||
// compile with compact 3 profile by default
|
||||
// NOTE: this is just a compile time check: does not replace testing with a compact3 JRE
|
||||
if (project.compactProfile != 'full') {
|
||||
|
@ -387,10 +387,13 @@ class BuildPlugin implements Plugin<Project> {
|
|||
options.encoding = 'UTF-8'
|
||||
//options.incremental = true
|
||||
|
||||
// gradle ignores target/source compatibility when it is "unnecessary", but since to compile with
|
||||
// java 9, gradle is running in java 8, it incorrectly thinks it is unnecessary
|
||||
assert minimumJava == JavaVersion.VERSION_1_8
|
||||
options.compilerArgs << '-target' << '1.8' << '-source' << '1.8'
|
||||
if (project.javaVersion == JavaVersion.VERSION_1_9) {
|
||||
// hack until gradle supports java 9's new "-release" arg
|
||||
assert minimumJava == JavaVersion.VERSION_1_8
|
||||
options.compilerArgs << '-release' << '8'
|
||||
project.sourceCompatibility = null
|
||||
project.targetCompatibility = null
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -456,7 +459,7 @@ class BuildPlugin implements Plugin<Project> {
|
|||
// default test sysprop values
|
||||
systemProperty 'tests.ifNoTests', 'fail'
|
||||
// TODO: remove setting logging level via system property
|
||||
systemProperty 'es.logger.level', 'WARN'
|
||||
systemProperty 'tests.logger.level', 'WARN'
|
||||
for (Map.Entry<String, String> property : System.properties.entrySet()) {
|
||||
if (property.getKey().startsWith('tests.') ||
|
||||
property.getKey().startsWith('es.')) {
|
||||
|
|
|
@ -87,6 +87,10 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
|
|||
* calls buildTest to actually build the test.
|
||||
*/
|
||||
void handleSnippet(Snippet snippet) {
|
||||
if (snippet.language == 'json') {
|
||||
throw new InvalidUserDataException(
|
||||
"$snippet: Use `js` instead of `json`.")
|
||||
}
|
||||
if (snippet.testSetup) {
|
||||
setup(snippet)
|
||||
return
|
||||
|
|
|
@ -62,9 +62,8 @@ class PrecommitTasks {
|
|||
private static Task configureForbiddenApis(Project project) {
|
||||
project.pluginManager.apply(ForbiddenApisPlugin.class)
|
||||
project.forbiddenApis {
|
||||
internalRuntimeForbidden = true
|
||||
failOnUnsupportedJava = false
|
||||
bundledSignatures = ['jdk-unsafe', 'jdk-deprecated', 'jdk-system-out']
|
||||
bundledSignatures = ['jdk-unsafe', 'jdk-deprecated', 'jdk-non-portable', 'jdk-system-out']
|
||||
signaturesURLs = [getClass().getResource('/forbidden/jdk-signatures.txt'),
|
||||
getClass().getResource('/forbidden/es-all-signatures.txt')]
|
||||
suppressAnnotations = ['**.SuppressForbidden']
|
||||
|
|
|
@ -203,8 +203,7 @@ public class ThirdPartyAuditTask extends AntTask {
|
|||
Set<String> sheistySet = getSheistyClasses(tmpDir.toPath());
|
||||
|
||||
try {
|
||||
ant.thirdPartyAudit(internalRuntimeForbidden: false,
|
||||
failOnUnsupportedJava: false,
|
||||
ant.thirdPartyAudit(failOnUnsupportedJava: false,
|
||||
failOnMissingClasses: false,
|
||||
signaturesFile: new File(getClass().getResource('/forbidden/third-party-audit.txt').toURI()),
|
||||
classpath: classpath.asPath) {
|
||||
|
|
|
@ -129,18 +129,18 @@ class NodeInfo {
|
|||
}
|
||||
|
||||
env = [ 'JAVA_HOME' : project.javaHome ]
|
||||
args.addAll("-E", "es.node.portsfile=true")
|
||||
args.addAll("-E", "node.portsfile=true")
|
||||
String collectedSystemProperties = config.systemProperties.collect { key, value -> "-D${key}=${value}" }.join(" ")
|
||||
String esJavaOpts = config.jvmArgs.isEmpty() ? collectedSystemProperties : collectedSystemProperties + " " + config.jvmArgs
|
||||
env.put('ES_JAVA_OPTS', esJavaOpts)
|
||||
for (Map.Entry<String, String> property : System.properties.entrySet()) {
|
||||
if (property.getKey().startsWith('es.')) {
|
||||
if (property.key.startsWith('tests.es.')) {
|
||||
args.add("-E")
|
||||
args.add("${property.getKey()}=${property.getValue()}")
|
||||
args.add("${property.key.substring('tests.es.'.size())}=${property.value}")
|
||||
}
|
||||
}
|
||||
env.put('ES_JVM_OPTIONS', new File(confDir, 'jvm.options'))
|
||||
args.addAll("-E", "es.path.conf=${confDir}")
|
||||
args.addAll("-E", "path.conf=${confDir}")
|
||||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||
args.add('"') // end the entire command, quoted
|
||||
}
|
||||
|
|
|
@ -37,8 +37,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]repositories[/\\]put[/\\]TransportPutRepositoryAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]repositories[/\\]verify[/\\]TransportVerifyRepositoryAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]repositories[/\\]verify[/\\]VerifyRepositoryRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]reroute[/\\]ClusterRerouteRequest.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]reroute[/\\]ClusterRerouteRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]reroute[/\\]TransportClusterRerouteAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]settings[/\\]ClusterUpdateSettingsAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]settings[/\\]ClusterUpdateSettingsRequestBuilder.java" checks="LineLength" />
|
||||
|
@ -179,12 +177,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]SimulatePipelineRequest.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]SimulatePipelineRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]SimulatePipelineTransportAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]percolate[/\\]MultiPercolateRequest.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]percolate[/\\]MultiPercolateRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]percolate[/\\]PercolateShardResponse.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]percolate[/\\]TransportMultiPercolateAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]percolate[/\\]TransportPercolateAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]percolate[/\\]TransportShardMultiPercolateAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]MultiSearchRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]SearchPhaseExecutionException.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]SearchResponse.java" checks="LineLength" />
|
||||
|
@ -453,9 +445,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]object[/\\]ObjectMapper.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]object[/\\]RootObjectMapper.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]merge[/\\]MergeStats.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]percolator[/\\]ExtractQueryTermsService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]percolator[/\\]PercolatorFieldMapper.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]percolator[/\\]PercolatorQueriesRegistry.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]AbstractQueryBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]MatchQueryParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]QueryBuilders.java" checks="LineLength" />
|
||||
|
@ -520,7 +509,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]jvm[/\\]JvmStats.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]node[/\\]Node.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]node[/\\]internal[/\\]InternalSettingsPreparer.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]PercolatorQuery.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]DummyPluginInfo.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]PluginsService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]RemovePluginCommand.java" checks="LineLength" />
|
||||
|
@ -566,7 +554,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]cat[/\\]RestThreadPoolAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]get[/\\]RestMultiGetAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]index[/\\]RestIndexAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]percolate[/\\]RestPercolateAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]script[/\\]RestDeleteIndexedScriptAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]script[/\\]RestPutIndexedScriptAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]search[/\\]RestClearScrollAction.java" checks="LineLength" />
|
||||
|
@ -745,7 +732,6 @@
|
|||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]SimulatePipelineRequestParsingTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]SimulatePipelineResponseTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]WriteableIngestDocumentTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]percolate[/\\]MultiPercolatorRequestTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]MultiSearchRequestTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]SearchRequestBuilderTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]AutoCreateIndexTests.java" checks="LineLength" />
|
||||
|
@ -981,8 +967,6 @@
|
|||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]typelevels[/\\]ParseDocumentTypeLevelsTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]update[/\\]UpdateMappingOnClusterIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]update[/\\]UpdateMappingTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]percolator[/\\]PercolatorFieldMapperTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]AbstractQueryTestCase.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]BoolQueryBuilderTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]BoostingQueryBuilderTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]CommonTermsQueryBuilderTests.java" checks="LineLength" />
|
||||
|
@ -1071,9 +1055,6 @@
|
|||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]os[/\\]OsProbeTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]nodesinfo[/\\]NodeInfoStreamingTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]options[/\\]detailederrors[/\\]DetailedErrorsEnabledIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]MultiPercolatorIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]PercolatorIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]PercolatorQueryTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]PluginInfoTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]PluginsServiceTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]recovery[/\\]FullRollingRestartIT.java" checks="LineLength" />
|
||||
|
@ -1222,6 +1203,16 @@
|
|||
<suppress files="modules[/\\]lang-mustache[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]package-info.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-mustache[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]mustache[/\\]MustacheScriptEngineTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-mustache[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]mustache[/\\]MustacheTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]percolator[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]MultiPercolateRequest.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]percolator[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]MultiPercolateRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]percolator[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]PercolateShardResponse.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]percolator[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]TransportMultiPercolateAction.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]percolator[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]TransportPercolateAction.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]percolator[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]TransportShardMultiPercolateAction.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]percolator[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]RestPercolateAction.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]percolator[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]MultiPercolatorIT.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]percolator[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]PercolatorIT.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]percolator[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]MultiPercolatorRequestTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]analysis-icu[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]IcuCollationTokenFilterFactory.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]analysis-icu[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]IcuFoldingTokenFilterFactory.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]analysis-icu[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]IcuNormalizerTokenFilterFactory.java" checks="LineLength" />
|
||||
|
@ -1232,13 +1223,6 @@
|
|||
<suppress files="plugins[/\\]analysis-phonetic[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]PhoneticTokenFilterFactory.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]analysis-smartcn[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]SimpleSmartChineseAnalysisTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]analysis-stempel[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]PolishAnalysisTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]delete-by-query[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]deletebyquery[/\\]DeleteByQueryRequest.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]delete-by-query[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]deletebyquery[/\\]DeleteByQueryRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]delete-by-query[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]deletebyquery[/\\]DeleteByQueryResponse.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]delete-by-query[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]deletebyquery[/\\]TransportDeleteByQueryAction.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]delete-by-query[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]deletebyquery[/\\]IndexDeleteByQueryResponseTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]delete-by-query[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]deletebyquery[/\\]TransportDeleteByQueryActionTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]delete-by-query[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugin[/\\]deletebyquery[/\\]DeleteByQueryTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]discovery-azure[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cloud[/\\]azure[/\\]AbstractAzureTestCase.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]discovery-azure[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]azure[/\\]AzureMinimumMasterNodesTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]discovery-azure[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]azure[/\\]AzureSimpleTests.java" checks="LineLength" />
|
||||
|
@ -1309,6 +1293,7 @@
|
|||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]cli[/\\]CliToolTestCase.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]MockBigArrays.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]script[/\\]NativeSignificanceScoreScriptWithParams.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]AbstractQueryTestCase.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]BackgroundIndexer.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]CompositeTestCluster.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]CorruptionUtils.java" checks="LineLength" />
|
||||
|
@ -1335,7 +1320,6 @@
|
|||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]junit[/\\]listeners[/\\]LoggingListener.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]ESRestTestCase.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]RestTestExecutionContext.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]client[/\\]RestClient.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]client[/\\]http[/\\]HttpRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]json[/\\]JsonPath.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]parser[/\\]GreaterThanEqualToParser.java" checks="LineLength" />
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
elasticsearch = 5.0.0
|
||||
elasticsearch = 5.0.0-alpha3
|
||||
lucene = 6.0.0
|
||||
|
||||
# optional dependencies
|
||||
|
@ -13,9 +13,7 @@ jna = 4.1.0
|
|||
# test dependencies
|
||||
randomizedrunner = 2.3.2
|
||||
junit = 4.11
|
||||
# TODO: Upgrade httpclient to a version > 4.5.1 once released. Then remove o.e.test.rest.client.StrictHostnameVerifier* and use
|
||||
# DefaultHostnameVerifier instead since we no longer need to workaround https://issues.apache.org/jira/browse/HTTPCLIENT-1698
|
||||
httpclient = 4.3.6
|
||||
httpcore = 4.3.3
|
||||
httpclient = 4.5.2
|
||||
httpcore = 4.4.4
|
||||
commonslogging = 1.1.3
|
||||
commonscodec = 1.10
|
||||
|
|
|
@ -16,25 +16,22 @@
|
|||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.index.percolator;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.index.Index;
|
||||
package org.apache.log4j;
|
||||
|
||||
import java.io.IOException;
|
||||
import org.apache.log4j.helpers.ThreadLocalMap;
|
||||
|
||||
/**
|
||||
* Exception during indexing a percolator query.
|
||||
* Log4j 1.2 MDC breaks because it parses java.version incorrectly (does not handle new java9 versioning).
|
||||
*
|
||||
* This hack fixes up the pkg private members as if it had detected the java version correctly.
|
||||
*/
|
||||
public class PercolatorException extends ElasticsearchException {
|
||||
public class Java9Hack {
|
||||
|
||||
public PercolatorException(Index index, String msg, Throwable cause) {
|
||||
super(msg, cause);
|
||||
setIndex(index);
|
||||
}
|
||||
|
||||
public PercolatorException(StreamInput in) throws IOException{
|
||||
super(in);
|
||||
public static void fixLog4j() {
|
||||
if (MDC.mdc.tlm == null) {
|
||||
MDC.mdc.java1 = false;
|
||||
MDC.mdc.tlm = new ThreadLocalMap();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -21,7 +21,6 @@ package org.elasticsearch;
|
|||
|
||||
import org.elasticsearch.action.support.replication.ReplicationOperation;
|
||||
import org.elasticsearch.cluster.action.shard.ShardStateAction;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
|
@ -681,8 +680,6 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
org.elasticsearch.index.shard.IndexShardRecoveryException::new, 106),
|
||||
REPOSITORY_MISSING_EXCEPTION(org.elasticsearch.repositories.RepositoryMissingException.class,
|
||||
org.elasticsearch.repositories.RepositoryMissingException::new, 107),
|
||||
PERCOLATOR_EXCEPTION(org.elasticsearch.index.percolator.PercolatorException.class,
|
||||
org.elasticsearch.index.percolator.PercolatorException::new, 108),
|
||||
DOCUMENT_SOURCE_MISSING_EXCEPTION(org.elasticsearch.index.engine.DocumentSourceMissingException.class,
|
||||
org.elasticsearch.index.engine.DocumentSourceMissingException::new, 109),
|
||||
FLUSH_NOT_ALLOWED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.FlushNotAllowedEngineException.class,
|
||||
|
|
|
@ -75,9 +75,9 @@ public class Version {
|
|||
public static final Version V_5_0_0_alpha1 = new Version(V_5_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
|
||||
public static final int V_5_0_0_alpha2_ID = 5000002;
|
||||
public static final Version V_5_0_0_alpha2 = new Version(V_5_0_0_alpha2_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
|
||||
public static final int V_5_0_0_ID = 5000099;
|
||||
public static final Version V_5_0_0 = new Version(V_5_0_0_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
|
||||
public static final Version CURRENT = V_5_0_0;
|
||||
public static final int V_5_0_0_alpha3_ID = 5000003;
|
||||
public static final Version V_5_0_0_alpha3 = new Version(V_5_0_0_alpha3_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
|
||||
public static final Version CURRENT = V_5_0_0_alpha3;
|
||||
|
||||
static {
|
||||
assert CURRENT.luceneVersion.equals(org.apache.lucene.util.Version.LATEST) : "Version must be upgraded to ["
|
||||
|
@ -90,8 +90,8 @@ public class Version {
|
|||
|
||||
public static Version fromId(int id) {
|
||||
switch (id) {
|
||||
case V_5_0_0_ID:
|
||||
return V_5_0_0;
|
||||
case V_5_0_0_alpha3_ID:
|
||||
return V_5_0_0_alpha3;
|
||||
case V_5_0_0_alpha2_ID:
|
||||
return V_5_0_0_alpha2;
|
||||
case V_5_0_0_alpha1_ID:
|
||||
|
|
|
@ -165,10 +165,6 @@ import org.elasticsearch.action.ingest.SimulatePipelineAction;
|
|||
import org.elasticsearch.action.ingest.SimulatePipelineTransportAction;
|
||||
import org.elasticsearch.action.main.MainAction;
|
||||
import org.elasticsearch.action.main.TransportMainAction;
|
||||
import org.elasticsearch.action.percolate.MultiPercolateAction;
|
||||
import org.elasticsearch.action.percolate.PercolateAction;
|
||||
import org.elasticsearch.action.percolate.TransportMultiPercolateAction;
|
||||
import org.elasticsearch.action.percolate.TransportPercolateAction;
|
||||
import org.elasticsearch.action.search.ClearScrollAction;
|
||||
import org.elasticsearch.action.search.MultiSearchAction;
|
||||
import org.elasticsearch.action.search.SearchAction;
|
||||
|
@ -332,8 +328,6 @@ public class ActionModule extends AbstractModule {
|
|||
registerAction(SearchAction.INSTANCE, TransportSearchAction.class);
|
||||
registerAction(SearchScrollAction.INSTANCE, TransportSearchScrollAction.class);
|
||||
registerAction(MultiSearchAction.INSTANCE, TransportMultiSearchAction.class);
|
||||
registerAction(PercolateAction.INSTANCE, TransportPercolateAction.class);
|
||||
registerAction(MultiPercolateAction.INSTANCE, TransportMultiPercolateAction.class);
|
||||
registerAction(ExplainAction.INSTANCE, TransportExplainAction.class);
|
||||
registerAction(ClearScrollAction.INSTANCE, TransportClearScrollAction.class);
|
||||
registerAction(RecoveryAction.INSTANCE, TransportRecoveryAction.class);
|
||||
|
|
|
@ -42,15 +42,18 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
|
|||
|
||||
private final ShardId shard;
|
||||
private final boolean primary;
|
||||
private final boolean hasPendingAsyncFetch;
|
||||
private final String assignedNodeId;
|
||||
private final UnassignedInfo unassignedInfo;
|
||||
private final long remainingDelayMillis;
|
||||
private final Map<DiscoveryNode, NodeExplanation> nodeExplanations;
|
||||
|
||||
public ClusterAllocationExplanation(ShardId shard, boolean primary, @Nullable String assignedNodeId, long remainingDelayMillis,
|
||||
@Nullable UnassignedInfo unassignedInfo, Map<DiscoveryNode, NodeExplanation> nodeExplanations) {
|
||||
@Nullable UnassignedInfo unassignedInfo, boolean hasPendingAsyncFetch,
|
||||
Map<DiscoveryNode, NodeExplanation> nodeExplanations) {
|
||||
this.shard = shard;
|
||||
this.primary = primary;
|
||||
this.hasPendingAsyncFetch = hasPendingAsyncFetch;
|
||||
this.assignedNodeId = assignedNodeId;
|
||||
this.unassignedInfo = unassignedInfo;
|
||||
this.remainingDelayMillis = remainingDelayMillis;
|
||||
|
@ -60,6 +63,7 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
|
|||
public ClusterAllocationExplanation(StreamInput in) throws IOException {
|
||||
this.shard = ShardId.readShardId(in);
|
||||
this.primary = in.readBoolean();
|
||||
this.hasPendingAsyncFetch = in.readBoolean();
|
||||
this.assignedNodeId = in.readOptionalString();
|
||||
this.unassignedInfo = in.readOptionalWriteable(UnassignedInfo::new);
|
||||
this.remainingDelayMillis = in.readVLong();
|
||||
|
@ -77,6 +81,7 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
|
|||
public void writeTo(StreamOutput out) throws IOException {
|
||||
this.getShard().writeTo(out);
|
||||
out.writeBoolean(this.isPrimary());
|
||||
out.writeBoolean(this.isStillFetchingShardData());
|
||||
out.writeOptionalString(this.getAssignedNodeId());
|
||||
out.writeOptionalWriteable(this.getUnassignedInfo());
|
||||
out.writeVLong(remainingDelayMillis);
|
||||
|
@ -97,6 +102,11 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
|
|||
return this.primary;
|
||||
}
|
||||
|
||||
/** Return turn if shard data is still being fetched for the allocation */
|
||||
public boolean isStillFetchingShardData() {
|
||||
return this.hasPendingAsyncFetch;
|
||||
}
|
||||
|
||||
/** Return turn if the shard is assigned to a node */
|
||||
public boolean isAssigned() {
|
||||
return this.assignedNodeId != null;
|
||||
|
@ -138,6 +148,7 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
|
|||
if (assignedNodeId != null) {
|
||||
builder.field("assigned_node_id", this.assignedNodeId);
|
||||
}
|
||||
builder.field("shard_state_fetch_pending", this.hasPendingAsyncFetch);
|
||||
// If we have unassigned info, show that
|
||||
if (unassignedInfo != null) {
|
||||
unassignedInfo.toXContent(builder, params);
|
||||
|
|
|
@ -50,6 +50,7 @@ import org.elasticsearch.common.collect.ImmutableOpenIntMap;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.gateway.GatewayAllocator;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
|
@ -69,19 +70,22 @@ public class TransportClusterAllocationExplainAction
|
|||
private final AllocationDeciders allocationDeciders;
|
||||
private final ShardsAllocator shardAllocator;
|
||||
private final TransportIndicesShardStoresAction shardStoresAction;
|
||||
private final GatewayAllocator gatewayAllocator;
|
||||
|
||||
@Inject
|
||||
public TransportClusterAllocationExplainAction(Settings settings, TransportService transportService, ClusterService clusterService,
|
||||
ThreadPool threadPool, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
ClusterInfoService clusterInfoService, AllocationDeciders allocationDeciders,
|
||||
ShardsAllocator shardAllocator, TransportIndicesShardStoresAction shardStoresAction) {
|
||||
ShardsAllocator shardAllocator, TransportIndicesShardStoresAction shardStoresAction,
|
||||
GatewayAllocator gatewayAllocator) {
|
||||
super(settings, ClusterAllocationExplainAction.NAME, transportService, clusterService, threadPool, actionFilters,
|
||||
indexNameExpressionResolver, ClusterAllocationExplainRequest::new);
|
||||
this.clusterInfoService = clusterInfoService;
|
||||
this.allocationDeciders = allocationDeciders;
|
||||
this.shardAllocator = shardAllocator;
|
||||
this.shardStoresAction = shardStoresAction;
|
||||
this.gatewayAllocator = gatewayAllocator;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -130,7 +134,8 @@ public class TransportClusterAllocationExplainAction
|
|||
Float nodeWeight,
|
||||
IndicesShardStoresResponse.StoreStatus storeStatus,
|
||||
String assignedNodeId,
|
||||
Set<String> activeAllocationIds) {
|
||||
Set<String> activeAllocationIds,
|
||||
boolean hasPendingAsyncFetch) {
|
||||
final ClusterAllocationExplanation.FinalDecision finalDecision;
|
||||
final ClusterAllocationExplanation.StoreCopy storeCopy;
|
||||
final String finalExplanation;
|
||||
|
@ -161,6 +166,19 @@ public class TransportClusterAllocationExplainAction
|
|||
if (node.getId().equals(assignedNodeId)) {
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.ALREADY_ASSIGNED;
|
||||
finalExplanation = "the shard is already assigned to this node";
|
||||
} else if (hasPendingAsyncFetch &&
|
||||
shard.primary() == false &&
|
||||
shard.unassigned() &&
|
||||
shard.allocatedPostIndexCreate(indexMetaData) &&
|
||||
nodeDecision.type() != Decision.Type.YES) {
|
||||
finalExplanation = "the shard cannot be assigned because allocation deciders return a " + nodeDecision.type().name() +
|
||||
" decision and the shard's state is still being fetched";
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
} else if (hasPendingAsyncFetch &&
|
||||
shard.unassigned() &&
|
||||
shard.allocatedPostIndexCreate(indexMetaData)) {
|
||||
finalExplanation = "the shard's state is still being fetched so it cannot be allocated";
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
} else if (shard.primary() && shard.unassigned() && shard.allocatedPostIndexCreate(indexMetaData) &&
|
||||
storeCopy == ClusterAllocationExplanation.StoreCopy.STALE) {
|
||||
finalExplanation = "the copy of the shard is stale, allocation ids do not match";
|
||||
|
@ -180,6 +198,7 @@ public class TransportClusterAllocationExplainAction
|
|||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
finalExplanation = "the shard cannot be assigned because one or more allocation decider returns a 'NO' decision";
|
||||
} else {
|
||||
// TODO: handle throttling decision better here
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.YES;
|
||||
if (storeCopy == ClusterAllocationExplanation.StoreCopy.AVAILABLE) {
|
||||
finalExplanation = "the shard can be assigned and the node contains a valid copy of the shard data";
|
||||
|
@ -198,7 +217,8 @@ public class TransportClusterAllocationExplainAction
|
|||
*/
|
||||
public static ClusterAllocationExplanation explainShard(ShardRouting shard, RoutingAllocation allocation, RoutingNodes routingNodes,
|
||||
boolean includeYesDecisions, ShardsAllocator shardAllocator,
|
||||
List<IndicesShardStoresResponse.StoreStatus> shardStores) {
|
||||
List<IndicesShardStoresResponse.StoreStatus> shardStores,
|
||||
GatewayAllocator gatewayAllocator) {
|
||||
// don't short circuit deciders, we want a full explanation
|
||||
allocation.debugDecision(true);
|
||||
// get the existing unassigned info if available
|
||||
|
@ -238,11 +258,12 @@ public class TransportClusterAllocationExplainAction
|
|||
Float weight = weights.get(node);
|
||||
IndicesShardStoresResponse.StoreStatus storeStatus = nodeToStatus.get(node);
|
||||
NodeExplanation nodeExplanation = calculateNodeExplanation(shard, indexMetaData, node, decision, weight,
|
||||
storeStatus, shard.currentNodeId(), indexMetaData.activeAllocationIds(shard.getId()));
|
||||
storeStatus, shard.currentNodeId(), indexMetaData.activeAllocationIds(shard.getId()),
|
||||
allocation.hasPendingAsyncFetch());
|
||||
explanations.put(node, nodeExplanation);
|
||||
}
|
||||
return new ClusterAllocationExplanation(shard.shardId(), shard.primary(),
|
||||
shard.currentNodeId(), remainingDelayMillis, ui, explanations);
|
||||
return new ClusterAllocationExplanation(shard.shardId(), shard.primary(), shard.currentNodeId(),
|
||||
remainingDelayMillis, ui, gatewayAllocator.hasFetchPending(shard.shardId(), shard.primary()), explanations);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -250,7 +271,7 @@ public class TransportClusterAllocationExplainAction
|
|||
final ActionListener<ClusterAllocationExplainResponse> listener) {
|
||||
final RoutingNodes routingNodes = state.getRoutingNodes();
|
||||
final RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, state,
|
||||
clusterInfoService.getClusterInfo(), System.nanoTime());
|
||||
clusterInfoService.getClusterInfo(), System.nanoTime(), false);
|
||||
|
||||
ShardRouting foundShard = null;
|
||||
if (request.useAnyUnassignedShard()) {
|
||||
|
@ -297,7 +318,7 @@ public class TransportClusterAllocationExplainAction
|
|||
shardStoreResponse.getStoreStatuses().get(shardRouting.getIndexName());
|
||||
List<IndicesShardStoresResponse.StoreStatus> shardStoreStatus = shardStatuses.get(shardRouting.id());
|
||||
ClusterAllocationExplanation cae = explainShard(shardRouting, allocation, routingNodes,
|
||||
request.includeYesDecisions(), shardAllocator, shardStoreStatus);
|
||||
request.includeYesDecisions(), shardAllocator, shardStoreStatus, gatewayAllocator);
|
||||
listener.onResponse(new ClusterAllocationExplainResponse(cae));
|
||||
}
|
||||
|
||||
|
|
|
@ -19,28 +19,24 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.reroute;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequest;
|
||||
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommand;
|
||||
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommandRegistry;
|
||||
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Request to submit cluster reroute allocation commands
|
||||
*/
|
||||
public class ClusterRerouteRequest extends AcknowledgedRequest<ClusterRerouteRequest> {
|
||||
AllocationCommands commands = new AllocationCommands();
|
||||
boolean dryRun;
|
||||
boolean explain;
|
||||
private AllocationCommands commands = new AllocationCommands();
|
||||
private boolean dryRun;
|
||||
private boolean explain;
|
||||
private boolean retryFailed;
|
||||
|
||||
public ClusterRerouteRequest() {
|
||||
}
|
||||
|
@ -81,6 +77,15 @@ public class ClusterRerouteRequest extends AcknowledgedRequest<ClusterRerouteReq
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the retry failed flag (defaults to <tt>false</tt>). If true, the
|
||||
* request will retry allocating shards that can't currently be allocated due to too many allocation failures.
|
||||
*/
|
||||
public ClusterRerouteRequest setRetryFailed(boolean retryFailed) {
|
||||
this.retryFailed = retryFailed;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the current explain flag
|
||||
*/
|
||||
|
@ -88,41 +93,27 @@ public class ClusterRerouteRequest extends AcknowledgedRequest<ClusterRerouteReq
|
|||
return this.explain;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the current retry failed flag
|
||||
*/
|
||||
public boolean isRetryFailed() {
|
||||
return this.retryFailed;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Set the allocation commands to execute.
|
||||
*/
|
||||
public ClusterRerouteRequest commands(AllocationCommand... commands) {
|
||||
this.commands = new AllocationCommands(commands);
|
||||
public ClusterRerouteRequest commands(AllocationCommands commands) {
|
||||
this.commands = commands;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the source for the request.
|
||||
* Returns the allocation commands to execute
|
||||
*/
|
||||
public ClusterRerouteRequest source(BytesReference source, AllocationCommandRegistry registry, ParseFieldMatcher parseFieldMatcher)
|
||||
throws Exception {
|
||||
try (XContentParser parser = XContentHelper.createParser(source)) {
|
||||
XContentParser.Token token;
|
||||
String currentFieldName = null;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token == XContentParser.Token.START_ARRAY) {
|
||||
if ("commands".equals(currentFieldName)) {
|
||||
this.commands = AllocationCommands.fromXContent(parser, parseFieldMatcher, registry);
|
||||
} else {
|
||||
throw new ElasticsearchParseException("failed to parse reroute request, got start array with wrong field name [{}]", currentFieldName);
|
||||
}
|
||||
} else if (token.isValue()) {
|
||||
if ("dry_run".equals(currentFieldName) || "dryRun".equals(currentFieldName)) {
|
||||
dryRun = parser.booleanValue();
|
||||
} else {
|
||||
throw new ElasticsearchParseException("failed to parse reroute request, got value with wrong field name [{}]", currentFieldName);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return this;
|
||||
public AllocationCommands getCommands() {
|
||||
return commands;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -136,6 +127,7 @@ public class ClusterRerouteRequest extends AcknowledgedRequest<ClusterRerouteReq
|
|||
commands = AllocationCommands.readFrom(in);
|
||||
dryRun = in.readBoolean();
|
||||
explain = in.readBoolean();
|
||||
retryFailed = in.readBoolean();
|
||||
readTimeout(in);
|
||||
}
|
||||
|
||||
|
@ -145,6 +137,28 @@ public class ClusterRerouteRequest extends AcknowledgedRequest<ClusterRerouteReq
|
|||
AllocationCommands.writeTo(commands, out);
|
||||
out.writeBoolean(dryRun);
|
||||
out.writeBoolean(explain);
|
||||
out.writeBoolean(retryFailed);
|
||||
writeTimeout(out);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null || getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
ClusterRerouteRequest other = (ClusterRerouteRequest) obj;
|
||||
// Override equals and hashCode for testing
|
||||
return Objects.equals(commands, other.commands) &&
|
||||
Objects.equals(dryRun, other.dryRun) &&
|
||||
Objects.equals(explain, other.explain) &&
|
||||
Objects.equals(timeout, other.timeout) &&
|
||||
Objects.equals(retryFailed, other.retryFailed) &&
|
||||
Objects.equals(masterNodeTimeout, other.masterNodeTimeout);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
// Override equals and hashCode for testing
|
||||
return Objects.hash(commands, dryRun, explain, timeout, retryFailed, masterNodeTimeout);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,13 +22,12 @@ package org.elasticsearch.action.admin.cluster.reroute;
|
|||
import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommand;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
|
||||
/**
|
||||
* Builder for a cluster reroute request
|
||||
*/
|
||||
public class ClusterRerouteRequestBuilder extends AcknowledgedRequestBuilder<ClusterRerouteRequest, ClusterRerouteResponse, ClusterRerouteRequestBuilder> {
|
||||
|
||||
public class ClusterRerouteRequestBuilder
|
||||
extends AcknowledgedRequestBuilder<ClusterRerouteRequest, ClusterRerouteResponse, ClusterRerouteRequestBuilder> {
|
||||
public ClusterRerouteRequestBuilder(ElasticsearchClient client, ClusterRerouteAction action) {
|
||||
super(client, action, new ClusterRerouteRequest());
|
||||
}
|
||||
|
@ -61,10 +60,11 @@ public class ClusterRerouteRequestBuilder extends AcknowledgedRequestBuilder<Clu
|
|||
}
|
||||
|
||||
/**
|
||||
* Sets the commands for the request to execute.
|
||||
* Sets the retry failed flag (defaults to <tt>false</tt>). If true, the
|
||||
* request will retry allocating shards that can't currently be allocated due to too many allocation failures.
|
||||
*/
|
||||
public ClusterRerouteRequestBuilder setCommands(AllocationCommand... commands) throws Exception {
|
||||
request.commands(commands);
|
||||
public ClusterRerouteRequestBuilder setRetryFailed(boolean retryFailed) {
|
||||
request.setRetryFailed(retryFailed);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -33,6 +33,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingExplanations;
|
|||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
@ -68,38 +69,55 @@ public class TransportClusterRerouteAction extends TransportMasterNodeAction<Clu
|
|||
|
||||
@Override
|
||||
protected void masterOperation(final ClusterRerouteRequest request, final ClusterState state, final ActionListener<ClusterRerouteResponse> listener) {
|
||||
clusterService.submitStateUpdateTask("cluster_reroute (api)", new AckedClusterStateUpdateTask<ClusterRerouteResponse>(Priority.IMMEDIATE, request, listener) {
|
||||
|
||||
private volatile ClusterState clusterStateToSend;
|
||||
private volatile RoutingExplanations explanations;
|
||||
|
||||
@Override
|
||||
protected ClusterRerouteResponse newResponse(boolean acknowledged) {
|
||||
return new ClusterRerouteResponse(acknowledged, clusterStateToSend, explanations);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onAckTimeout() {
|
||||
listener.onResponse(new ClusterRerouteResponse(false, clusterStateToSend, new RoutingExplanations()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
logger.debug("failed to perform [{}]", t, source);
|
||||
super.onFailure(source, t);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
RoutingAllocation.Result routingResult = allocationService.reroute(currentState, request.commands, request.explain());
|
||||
ClusterState newState = ClusterState.builder(currentState).routingResult(routingResult).build();
|
||||
clusterStateToSend = newState;
|
||||
explanations = routingResult.explanations();
|
||||
if (request.dryRun) {
|
||||
return currentState;
|
||||
}
|
||||
return newState;
|
||||
}
|
||||
});
|
||||
clusterService.submitStateUpdateTask("cluster_reroute (api)", new ClusterRerouteResponseAckedClusterStateUpdateTask(logger,
|
||||
allocationService, request, listener));
|
||||
}
|
||||
}
|
||||
|
||||
static class ClusterRerouteResponseAckedClusterStateUpdateTask extends AckedClusterStateUpdateTask<ClusterRerouteResponse> {
|
||||
|
||||
private final ClusterRerouteRequest request;
|
||||
private final ActionListener<ClusterRerouteResponse> listener;
|
||||
private final ESLogger logger;
|
||||
private final AllocationService allocationService;
|
||||
private volatile ClusterState clusterStateToSend;
|
||||
private volatile RoutingExplanations explanations;
|
||||
|
||||
ClusterRerouteResponseAckedClusterStateUpdateTask(ESLogger logger, AllocationService allocationService, ClusterRerouteRequest request,
|
||||
ActionListener<ClusterRerouteResponse> listener) {
|
||||
super(Priority.IMMEDIATE, request, listener);
|
||||
this.request = request;
|
||||
this.listener = listener;
|
||||
this.logger = logger;
|
||||
this.allocationService = allocationService;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ClusterRerouteResponse newResponse(boolean acknowledged) {
|
||||
return new ClusterRerouteResponse(acknowledged, clusterStateToSend, explanations);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onAckTimeout() {
|
||||
listener.onResponse(new ClusterRerouteResponse(false, clusterStateToSend, new RoutingExplanations()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
logger.debug("failed to perform [{}]", t, source);
|
||||
super.onFailure(source, t);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
RoutingAllocation.Result routingResult = allocationService.reroute(currentState, request.getCommands(), request.explain(),
|
||||
request.isRetryFailed());
|
||||
ClusterState newState = ClusterState.builder(currentState).routingResult(routingResult).build();
|
||||
clusterStateToSend = newState;
|
||||
explanations = routingResult.explanations();
|
||||
if (request.dryRun()) {
|
||||
return currentState;
|
||||
}
|
||||
return newState;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -81,18 +81,13 @@ public class CreateSnapshotResponse extends ActionResponse implements ToXContent
|
|||
return snapshotInfo.status();
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final String SNAPSHOT = "snapshot";
|
||||
static final String ACCEPTED = "accepted";
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
if (snapshotInfo != null) {
|
||||
builder.field(Fields.SNAPSHOT);
|
||||
snapshotInfo.toExternalXContent(builder, params);
|
||||
builder.field("snapshot");
|
||||
snapshotInfo.toXContent(builder, params);
|
||||
} else {
|
||||
builder.field(Fields.ACCEPTED, true);
|
||||
builder.field("accepted", true);
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
|
|
@ -74,15 +74,11 @@ public class GetSnapshotsResponse extends ActionResponse implements ToXContent {
|
|||
}
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final String SNAPSHOTS = "snapshots";
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
|
||||
builder.startArray(Fields.SNAPSHOTS);
|
||||
builder.startArray("snapshots");
|
||||
for (SnapshotInfo snapshotInfo : snapshots) {
|
||||
snapshotInfo.toExternalXContent(builder, params);
|
||||
snapshotInfo.toXContent(builder, params);
|
||||
}
|
||||
builder.endArray();
|
||||
return builder;
|
||||
|
|
|
@ -73,18 +73,13 @@ public class RestoreSnapshotResponse extends ActionResponse implements ToXConten
|
|||
return restoreInfo.status();
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final String SNAPSHOT = "snapshot";
|
||||
static final String ACCEPTED = "accepted";
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
|
||||
if (restoreInfo != null) {
|
||||
builder.field(Fields.SNAPSHOT);
|
||||
builder.field("snapshot");
|
||||
restoreInfo.toXContent(builder, params);
|
||||
} else {
|
||||
builder.field(Fields.ACCEPTED, true);
|
||||
builder.field("accepted", true);
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
|
|
@ -73,13 +73,9 @@ public class SnapshotsStatusResponse extends ActionResponse implements ToXConten
|
|||
}
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final String SNAPSHOTS = "snapshots";
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startArray(Fields.SNAPSHOTS);
|
||||
builder.startArray("snapshots");
|
||||
for (SnapshotStatus snapshot : snapshots) {
|
||||
snapshot.toXContent(builder, params);
|
||||
}
|
||||
|
|
|
@ -27,7 +27,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.index.cache.query.QueryCacheStats;
|
||||
import org.elasticsearch.index.engine.SegmentsStats;
|
||||
import org.elasticsearch.index.fielddata.FieldDataStats;
|
||||
import org.elasticsearch.index.percolator.PercolatorQueryCacheStats;
|
||||
import org.elasticsearch.index.shard.DocsStats;
|
||||
import org.elasticsearch.index.store.StoreStats;
|
||||
import org.elasticsearch.search.suggest.completion.CompletionStats;
|
||||
|
@ -45,7 +44,6 @@ public class ClusterStatsIndices implements ToXContent {
|
|||
private QueryCacheStats queryCache;
|
||||
private CompletionStats completion;
|
||||
private SegmentsStats segments;
|
||||
private PercolatorQueryCacheStats percolatorCache;
|
||||
|
||||
public ClusterStatsIndices(List<ClusterStatsNodeResponse> nodeResponses) {
|
||||
ObjectObjectHashMap<String, ShardStats> countsPerIndex = new ObjectObjectHashMap<>();
|
||||
|
@ -56,7 +54,6 @@ public class ClusterStatsIndices implements ToXContent {
|
|||
this.queryCache = new QueryCacheStats();
|
||||
this.completion = new CompletionStats();
|
||||
this.segments = new SegmentsStats();
|
||||
this.percolatorCache = new PercolatorQueryCacheStats();
|
||||
|
||||
for (ClusterStatsNodeResponse r : nodeResponses) {
|
||||
for (org.elasticsearch.action.admin.indices.stats.ShardStats shardStats : r.shardsStats()) {
|
||||
|
@ -79,7 +76,6 @@ public class ClusterStatsIndices implements ToXContent {
|
|||
queryCache.add(shardCommonStats.queryCache);
|
||||
completion.add(shardCommonStats.completion);
|
||||
segments.add(shardCommonStats.segments);
|
||||
percolatorCache.add(shardCommonStats.percolatorCache);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -122,10 +118,6 @@ public class ClusterStatsIndices implements ToXContent {
|
|||
return segments;
|
||||
}
|
||||
|
||||
public PercolatorQueryCacheStats getPercolatorCache() {
|
||||
return percolatorCache;
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final String COUNT = "count";
|
||||
}
|
||||
|
@ -140,7 +132,6 @@ public class ClusterStatsIndices implements ToXContent {
|
|||
queryCache.toXContent(builder, params);
|
||||
completion.toXContent(builder, params);
|
||||
segments.toXContent(builder, params);
|
||||
percolatorCache.toXContent(builder, params);
|
||||
return builder;
|
||||
}
|
||||
|
||||
|
|
|
@ -55,8 +55,7 @@ public class TransportClusterStatsAction extends TransportNodesAction<ClusterSta
|
|||
TransportClusterStatsAction.ClusterStatsNodeRequest, ClusterStatsNodeResponse> {
|
||||
|
||||
private static final CommonStatsFlags SHARD_STATS_FLAGS = new CommonStatsFlags(CommonStatsFlags.Flag.Docs, CommonStatsFlags.Flag.Store,
|
||||
CommonStatsFlags.Flag.FieldData, CommonStatsFlags.Flag.QueryCache, CommonStatsFlags.Flag.Completion, CommonStatsFlags.Flag.Segments,
|
||||
CommonStatsFlags.Flag.PercolatorCache);
|
||||
CommonStatsFlags.Flag.FieldData, CommonStatsFlags.Flag.QueryCache, CommonStatsFlags.Flag.Completion, CommonStatsFlags.Flag.Segments);
|
||||
|
||||
private final NodeService nodeService;
|
||||
private final IndicesService indicesService;
|
||||
|
@ -100,7 +99,7 @@ public class TransportClusterStatsAction extends TransportNodesAction<ClusterSta
|
|||
for (IndexShard indexShard : indexService) {
|
||||
if (indexShard.routingEntry() != null && indexShard.routingEntry().active()) {
|
||||
// only report on fully started shards
|
||||
shardsStats.add(new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), indexService.cache().getPercolatorQueryCache(), indexShard, SHARD_STATS_FLAGS), indexShard.commitStats()));
|
||||
shardsStats.add(new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), indexShard, SHARD_STATS_FLAGS), indexShard.commitStats()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -32,10 +32,8 @@ import org.elasticsearch.index.engine.SegmentsStats;
|
|||
import org.elasticsearch.index.fielddata.FieldDataStats;
|
||||
import org.elasticsearch.index.flush.FlushStats;
|
||||
import org.elasticsearch.index.get.GetStats;
|
||||
import org.elasticsearch.index.percolator.PercolatorQueryCache;
|
||||
import org.elasticsearch.index.shard.IndexingStats;
|
||||
import org.elasticsearch.index.merge.MergeStats;
|
||||
import org.elasticsearch.index.percolator.PercolatorQueryCacheStats;
|
||||
import org.elasticsearch.index.recovery.RecoveryStats;
|
||||
import org.elasticsearch.index.refresh.RefreshStats;
|
||||
import org.elasticsearch.index.search.stats.SearchStats;
|
||||
|
@ -101,9 +99,6 @@ public class CommonStats implements Streamable, ToXContent {
|
|||
case Segments:
|
||||
segments = new SegmentsStats();
|
||||
break;
|
||||
case PercolatorCache:
|
||||
percolatorCache = new PercolatorQueryCacheStats();
|
||||
break;
|
||||
case Translog:
|
||||
translog = new TranslogStats();
|
||||
break;
|
||||
|
@ -123,8 +118,7 @@ public class CommonStats implements Streamable, ToXContent {
|
|||
}
|
||||
|
||||
|
||||
public CommonStats(IndicesQueryCache indicesQueryCache, PercolatorQueryCache percolatorQueryCache,
|
||||
IndexShard indexShard, CommonStatsFlags flags) {
|
||||
public CommonStats(IndicesQueryCache indicesQueryCache, IndexShard indexShard, CommonStatsFlags flags) {
|
||||
|
||||
CommonStatsFlags.Flag[] setFlags = flags.getFlags();
|
||||
|
||||
|
@ -169,9 +163,6 @@ public class CommonStats implements Streamable, ToXContent {
|
|||
case Segments:
|
||||
segments = indexShard.segmentStats(flags.includeSegmentFileSizes());
|
||||
break;
|
||||
case PercolatorCache:
|
||||
percolatorCache = percolatorQueryCache.getStats(indexShard.shardId());
|
||||
break;
|
||||
case Translog:
|
||||
translog = indexShard.translogStats();
|
||||
break;
|
||||
|
@ -223,9 +214,6 @@ public class CommonStats implements Streamable, ToXContent {
|
|||
@Nullable
|
||||
public FieldDataStats fieldData;
|
||||
|
||||
@Nullable
|
||||
public PercolatorQueryCacheStats percolatorCache;
|
||||
|
||||
@Nullable
|
||||
public CompletionStats completion;
|
||||
|
||||
|
@ -331,14 +319,6 @@ public class CommonStats implements Streamable, ToXContent {
|
|||
} else {
|
||||
fieldData.add(stats.getFieldData());
|
||||
}
|
||||
if (percolatorCache == null) {
|
||||
if (stats.getPercolatorCache() != null) {
|
||||
percolatorCache = new PercolatorQueryCacheStats();
|
||||
percolatorCache.add(stats.getPercolatorCache());
|
||||
}
|
||||
} else {
|
||||
percolatorCache.add(stats.getPercolatorCache());
|
||||
}
|
||||
if (completion == null) {
|
||||
if (stats.getCompletion() != null) {
|
||||
completion = new CompletionStats();
|
||||
|
@ -436,11 +416,6 @@ public class CommonStats implements Streamable, ToXContent {
|
|||
return this.fieldData;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public PercolatorQueryCacheStats getPercolatorCache() {
|
||||
return percolatorCache;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public CompletionStats getCompletion() {
|
||||
return completion;
|
||||
|
@ -528,9 +503,6 @@ public class CommonStats implements Streamable, ToXContent {
|
|||
if (in.readBoolean()) {
|
||||
fieldData = FieldDataStats.readFieldDataStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
percolatorCache = PercolatorQueryCacheStats.readPercolateStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
completion = CompletionStats.readCompletionStats(in);
|
||||
}
|
||||
|
@ -610,12 +582,6 @@ public class CommonStats implements Streamable, ToXContent {
|
|||
out.writeBoolean(true);
|
||||
fieldData.writeTo(out);
|
||||
}
|
||||
if (percolatorCache == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
percolatorCache.writeTo(out);
|
||||
}
|
||||
if (completion == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
|
@ -669,9 +635,6 @@ public class CommonStats implements Streamable, ToXContent {
|
|||
if (fieldData != null) {
|
||||
fieldData.toXContent(builder, params);
|
||||
}
|
||||
if (percolatorCache != null) {
|
||||
percolatorCache.toXContent(builder, params);
|
||||
}
|
||||
if (completion != null) {
|
||||
completion.toXContent(builder, params);
|
||||
}
|
||||
|
|
|
@ -240,7 +240,6 @@ public class CommonStatsFlags implements Streamable, Cloneable {
|
|||
FieldData("fielddata"),
|
||||
Docs("docs"),
|
||||
Warmer("warmer"),
|
||||
PercolatorCache("percolator_cache"),
|
||||
Completion("completion"),
|
||||
Segments("segments"),
|
||||
Translog("translog"),
|
||||
|
|
|
@ -184,15 +184,6 @@ public class IndicesStatsRequest extends BroadcastRequest<IndicesStatsRequest> {
|
|||
return flags.isSet(Flag.FieldData);
|
||||
}
|
||||
|
||||
public IndicesStatsRequest percolate(boolean percolate) {
|
||||
flags.set(Flag.PercolatorCache, percolate);
|
||||
return this;
|
||||
}
|
||||
|
||||
public boolean percolate() {
|
||||
return flags.isSet(Flag.PercolatorCache);
|
||||
}
|
||||
|
||||
public IndicesStatsRequest segments(boolean segments) {
|
||||
flags.set(Flag.Segments, segments);
|
||||
return this;
|
||||
|
|
|
@ -127,11 +127,6 @@ public class IndicesStatsRequestBuilder extends BroadcastOperationRequestBuilder
|
|||
return this;
|
||||
}
|
||||
|
||||
public IndicesStatsRequestBuilder setPercolate(boolean percolate) {
|
||||
request.percolate(percolate);
|
||||
return this;
|
||||
}
|
||||
|
||||
public IndicesStatsRequestBuilder setSegments(boolean segments) {
|
||||
request.segments(segments);
|
||||
return this;
|
||||
|
|
|
@ -139,9 +139,6 @@ public class TransportIndicesStatsAction extends TransportBroadcastByNodeAction<
|
|||
flags.set(CommonStatsFlags.Flag.FieldData);
|
||||
flags.fieldDataFields(request.fieldDataFields());
|
||||
}
|
||||
if (request.percolate()) {
|
||||
flags.set(CommonStatsFlags.Flag.PercolatorCache);
|
||||
}
|
||||
if (request.segments()) {
|
||||
flags.set(CommonStatsFlags.Flag.Segments);
|
||||
flags.includeSegmentFileSizes(request.includeSegmentFileSizes());
|
||||
|
@ -163,6 +160,6 @@ public class TransportIndicesStatsAction extends TransportBroadcastByNodeAction<
|
|||
flags.set(CommonStatsFlags.Flag.Recovery);
|
||||
}
|
||||
|
||||
return new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), indexService.cache().getPercolatorQueryCache(), indexShard, flags), indexShard.commitStats());
|
||||
return new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), indexShard, flags), indexShard.commitStats());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,8 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
|
@ -36,13 +34,10 @@ class ParsedScrollId {
|
|||
|
||||
private final ScrollIdForNode[] context;
|
||||
|
||||
private final Map<String, String> attributes;
|
||||
|
||||
public ParsedScrollId(String source, String type, ScrollIdForNode[] context, Map<String, String> attributes) {
|
||||
public ParsedScrollId(String source, String type, ScrollIdForNode[] context) {
|
||||
this.source = source;
|
||||
this.type = type;
|
||||
this.context = context;
|
||||
this.attributes = attributes;
|
||||
}
|
||||
|
||||
public String getSource() {
|
||||
|
@ -56,8 +51,4 @@ class ParsedScrollId {
|
|||
public ScrollIdForNode[] getContext() {
|
||||
return context;
|
||||
}
|
||||
|
||||
public Map<String, String> getAttributes() {
|
||||
return this.attributes;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -123,7 +123,7 @@ class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<DfsSea
|
|||
queryFetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
|
||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults);
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(),
|
||||
buildTookInMillis(), buildShardFailures()));
|
||||
|
|
|
@ -200,7 +200,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<DfsSe
|
|||
fetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
|
||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults);
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(),
|
||||
buildTookInMillis(), buildShardFailures()));
|
||||
|
|
|
@ -66,7 +66,7 @@ class SearchQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<QueryFetc
|
|||
firstResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
|
||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults);
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(),
|
||||
buildTookInMillis(), buildShardFailures()));
|
||||
|
|
|
@ -133,7 +133,7 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<QuerySea
|
|||
fetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
|
||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults);
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps,
|
||||
successfulOps.get(), buildTookInMillis(), buildShardFailures()));
|
||||
|
|
|
@ -19,21 +19,16 @@
|
|||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.CharsRefBuilder;
|
||||
import org.apache.lucene.store.ByteArrayDataInput;
|
||||
import org.apache.lucene.store.RAMOutputStream;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.Base64;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.SearchPhaseResult;
|
||||
import org.elasticsearch.search.internal.InternalScrollSearchRequest;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import java.util.Base64;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -49,79 +44,49 @@ final class TransportSearchHelper {
|
|||
return new InternalScrollSearchRequest(request, id);
|
||||
}
|
||||
|
||||
static String buildScrollId(SearchType searchType, AtomicArray<? extends SearchPhaseResult> searchPhaseResults,
|
||||
@Nullable Map<String, String> attributes) throws IOException {
|
||||
static String buildScrollId(SearchType searchType, AtomicArray<? extends SearchPhaseResult> searchPhaseResults) throws IOException {
|
||||
if (searchType == SearchType.DFS_QUERY_THEN_FETCH || searchType == SearchType.QUERY_THEN_FETCH) {
|
||||
return buildScrollId(ParsedScrollId.QUERY_THEN_FETCH_TYPE, searchPhaseResults, attributes);
|
||||
return buildScrollId(ParsedScrollId.QUERY_THEN_FETCH_TYPE, searchPhaseResults);
|
||||
} else if (searchType == SearchType.QUERY_AND_FETCH || searchType == SearchType.DFS_QUERY_AND_FETCH) {
|
||||
return buildScrollId(ParsedScrollId.QUERY_AND_FETCH_TYPE, searchPhaseResults, attributes);
|
||||
return buildScrollId(ParsedScrollId.QUERY_AND_FETCH_TYPE, searchPhaseResults);
|
||||
} else {
|
||||
throw new IllegalStateException("search_type [" + searchType + "] not supported");
|
||||
}
|
||||
}
|
||||
|
||||
static String buildScrollId(String type, AtomicArray<? extends SearchPhaseResult> searchPhaseResults,
|
||||
@Nullable Map<String, String> attributes) throws IOException {
|
||||
StringBuilder sb = new StringBuilder().append(type).append(';');
|
||||
sb.append(searchPhaseResults.asList().size()).append(';');
|
||||
for (AtomicArray.Entry<? extends SearchPhaseResult> entry : searchPhaseResults.asList()) {
|
||||
SearchPhaseResult searchPhaseResult = entry.value;
|
||||
sb.append(searchPhaseResult.id()).append(':').append(searchPhaseResult.shardTarget().nodeId()).append(';');
|
||||
}
|
||||
if (attributes == null) {
|
||||
sb.append("0;");
|
||||
} else {
|
||||
sb.append(attributes.size()).append(";");
|
||||
for (Map.Entry<String, String> entry : attributes.entrySet()) {
|
||||
sb.append(entry.getKey()).append(':').append(entry.getValue()).append(';');
|
||||
static String buildScrollId(String type, AtomicArray<? extends SearchPhaseResult> searchPhaseResults) throws IOException {
|
||||
try (RAMOutputStream out = new RAMOutputStream()) {
|
||||
out.writeString(type);
|
||||
out.writeVInt(searchPhaseResults.asList().size());
|
||||
for (AtomicArray.Entry<? extends SearchPhaseResult> entry : searchPhaseResults.asList()) {
|
||||
SearchPhaseResult searchPhaseResult = entry.value;
|
||||
out.writeLong(searchPhaseResult.id());
|
||||
out.writeString(searchPhaseResult.shardTarget().nodeId());
|
||||
}
|
||||
byte[] bytes = new byte[(int) out.getFilePointer()];
|
||||
out.writeTo(bytes, 0);
|
||||
return Base64.getUrlEncoder().encodeToString(bytes);
|
||||
}
|
||||
BytesRef bytesRef = new BytesRef(sb);
|
||||
return Base64.encodeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length, Base64.URL_SAFE);
|
||||
}
|
||||
|
||||
static ParsedScrollId parseScrollId(String scrollId) {
|
||||
CharsRefBuilder spare = new CharsRefBuilder();
|
||||
try {
|
||||
byte[] decode = Base64.decode(scrollId, Base64.URL_SAFE);
|
||||
spare.copyUTF8Bytes(decode, 0, decode.length);
|
||||
byte[] bytes = Base64.getUrlDecoder().decode(scrollId);
|
||||
ByteArrayDataInput in = new ByteArrayDataInput(bytes);
|
||||
String type = in.readString();
|
||||
ScrollIdForNode[] context = new ScrollIdForNode[in.readVInt()];
|
||||
for (int i = 0; i < context.length; ++i) {
|
||||
long id = in.readLong();
|
||||
String target = in.readString();
|
||||
context[i] = new ScrollIdForNode(target, id);
|
||||
}
|
||||
if (in.getPosition() != bytes.length) {
|
||||
throw new IllegalArgumentException("Not all bytes were read");
|
||||
}
|
||||
return new ParsedScrollId(scrollId, type, context);
|
||||
} catch (Exception e) {
|
||||
throw new IllegalArgumentException("Failed to decode scrollId", e);
|
||||
throw new IllegalArgumentException("Cannot parse scroll id", e);
|
||||
}
|
||||
String[] elements = spare.get().toString().split(";");
|
||||
if (elements.length < 2) {
|
||||
throw new IllegalArgumentException("Malformed scrollId [" + scrollId + "]");
|
||||
}
|
||||
|
||||
int index = 0;
|
||||
String type = elements[index++];
|
||||
int contextSize = Integer.parseInt(elements[index++]);
|
||||
if (elements.length < contextSize + 2) {
|
||||
throw new IllegalArgumentException("Malformed scrollId [" + scrollId + "]");
|
||||
}
|
||||
|
||||
ScrollIdForNode[] context = new ScrollIdForNode[contextSize];
|
||||
for (int i = 0; i < contextSize; i++) {
|
||||
String element = elements[index++];
|
||||
int sep = element.indexOf(':');
|
||||
if (sep == -1) {
|
||||
throw new IllegalArgumentException("Malformed scrollId [" + scrollId + "]");
|
||||
}
|
||||
context[i] = new ScrollIdForNode(element.substring(sep + 1), Long.parseLong(element.substring(0, sep)));
|
||||
}
|
||||
Map<String, String> attributes;
|
||||
int attributesSize = Integer.parseInt(elements[index++]);
|
||||
if (attributesSize == 0) {
|
||||
attributes = emptyMap();
|
||||
} else {
|
||||
attributes = new HashMap<>(attributesSize);
|
||||
for (int i = 0; i < attributesSize; i++) {
|
||||
String element = elements[index++];
|
||||
int sep = element.indexOf(':');
|
||||
attributes.put(element.substring(0, sep), element.substring(sep + 1));
|
||||
}
|
||||
}
|
||||
return new ParsedScrollId(scrollId, type, context, attributes);
|
||||
}
|
||||
|
||||
private TransportSearchHelper() {
|
||||
|
|
|
@ -177,15 +177,7 @@ final class Bootstrap {
|
|||
// install SM after natives, shutdown hooks, etc.
|
||||
Security.configure(environment, BootstrapSettings.SECURITY_FILTER_BAD_DEFAULTS_SETTING.get(settings));
|
||||
|
||||
// We do not need to reload system properties here as we have already applied them in building the settings and
|
||||
// reloading could cause multiple prompts to the user for values if a system property was specified with a prompt
|
||||
// placeholder
|
||||
Settings nodeSettings = Settings.builder()
|
||||
.put(settings)
|
||||
.put(InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING.getKey(), true)
|
||||
.build();
|
||||
|
||||
node = new Node(nodeSettings) {
|
||||
node = new Node(settings) {
|
||||
@Override
|
||||
protected void validateNodeBeforeAcceptingRequests(Settings settings, BoundTransportAddress boundTransportAddress) {
|
||||
BootstrapCheck.check(settings, boundTransportAddress);
|
||||
|
@ -193,13 +185,13 @@ final class Bootstrap {
|
|||
};
|
||||
}
|
||||
|
||||
private static Environment initialSettings(boolean foreground, String pidFile) {
|
||||
private static Environment initialSettings(boolean foreground, String pidFile, Map<String, String> esSettings) {
|
||||
Terminal terminal = foreground ? Terminal.DEFAULT : null;
|
||||
Settings.Builder builder = Settings.builder();
|
||||
if (Strings.hasLength(pidFile)) {
|
||||
builder.put(Environment.PIDFILE_SETTING.getKey(), pidFile);
|
||||
}
|
||||
return InternalSettingsPreparer.prepareEnvironment(builder.build(), terminal);
|
||||
return InternalSettingsPreparer.prepareEnvironment(builder.build(), terminal, esSettings);
|
||||
}
|
||||
|
||||
private void start() {
|
||||
|
@ -233,11 +225,13 @@ final class Bootstrap {
|
|||
// Set the system property before anything has a chance to trigger its use
|
||||
initLoggerPrefix();
|
||||
|
||||
elasticsearchSettings(esSettings);
|
||||
// force the class initializer for BootstrapInfo to run before
|
||||
// the security manager is installed
|
||||
BootstrapInfo.init();
|
||||
|
||||
INSTANCE = new Bootstrap();
|
||||
|
||||
Environment environment = initialSettings(foreground, pidFile);
|
||||
Environment environment = initialSettings(foreground, pidFile, esSettings);
|
||||
Settings settings = environment.settings();
|
||||
LogConfigurator.configure(settings, true);
|
||||
checkForCustomConfFile();
|
||||
|
@ -295,13 +289,6 @@ final class Bootstrap {
|
|||
}
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "Sets system properties passed as CLI parameters")
|
||||
private static void elasticsearchSettings(Map<String, String> esSettings) {
|
||||
for (Map.Entry<String, String> esSetting : esSettings.entrySet()) {
|
||||
System.setProperty(esSetting.getKey(), esSetting.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "System#out")
|
||||
private static void closeSystOut() {
|
||||
System.out.close();
|
||||
|
|
|
@ -120,4 +120,8 @@ public final class BootstrapInfo {
|
|||
}
|
||||
return SYSTEM_PROPERTIES;
|
||||
}
|
||||
|
||||
public static void init() {
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -21,28 +21,25 @@ package org.elasticsearch.bootstrap;
|
|||
|
||||
import joptsimple.OptionSet;
|
||||
import joptsimple.OptionSpec;
|
||||
import joptsimple.util.KeyValuePair;
|
||||
import org.elasticsearch.Build;
|
||||
import org.elasticsearch.cli.Command;
|
||||
import org.elasticsearch.cli.ExitCodes;
|
||||
import org.elasticsearch.cli.SettingCommand;
|
||||
import org.elasticsearch.cli.Terminal;
|
||||
import org.elasticsearch.cli.UserError;
|
||||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* This class starts elasticsearch.
|
||||
*/
|
||||
class Elasticsearch extends Command {
|
||||
class Elasticsearch extends SettingCommand {
|
||||
|
||||
private final OptionSpec<Void> versionOption;
|
||||
private final OptionSpec<Void> daemonizeOption;
|
||||
private final OptionSpec<String> pidfileOption;
|
||||
private final OptionSpec<KeyValuePair> propertyOption;
|
||||
|
||||
// visible for testing
|
||||
Elasticsearch() {
|
||||
|
@ -56,7 +53,6 @@ class Elasticsearch extends Command {
|
|||
pidfileOption = parser.acceptsAll(Arrays.asList("p", "pidfile"),
|
||||
"Creates a pid file in the specified path on start")
|
||||
.withRequiredArg();
|
||||
propertyOption = parser.accepts("E", "Configure an Elasticsearch setting").withRequiredArg().ofType(KeyValuePair.class);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -75,7 +71,7 @@ class Elasticsearch extends Command {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void execute(Terminal terminal, OptionSet options) throws Exception {
|
||||
protected void execute(Terminal terminal, OptionSet options, Map<String, String> settings) throws Exception {
|
||||
if (options.nonOptionArguments().isEmpty() == false) {
|
||||
throw new UserError(ExitCodes.USAGE, "Positional arguments not allowed, found " + options.nonOptionArguments());
|
||||
}
|
||||
|
@ -84,26 +80,15 @@ class Elasticsearch extends Command {
|
|||
throw new UserError(ExitCodes.USAGE, "Elasticsearch version option is mutually exclusive with any other option");
|
||||
}
|
||||
terminal.println("Version: " + org.elasticsearch.Version.CURRENT
|
||||
+ ", Build: " + Build.CURRENT.shortHash() + "/" + Build.CURRENT.date()
|
||||
+ ", JVM: " + JvmInfo.jvmInfo().version());
|
||||
+ ", Build: " + Build.CURRENT.shortHash() + "/" + Build.CURRENT.date()
|
||||
+ ", JVM: " + JvmInfo.jvmInfo().version());
|
||||
return;
|
||||
}
|
||||
|
||||
final boolean daemonize = options.has(daemonizeOption);
|
||||
final String pidFile = pidfileOption.value(options);
|
||||
|
||||
final Map<String, String> esSettings = new HashMap<>();
|
||||
for (final KeyValuePair kvp : propertyOption.values(options)) {
|
||||
if (!kvp.key.startsWith("es.")) {
|
||||
throw new UserError(ExitCodes.USAGE, "Elasticsearch settings must be prefixed with [es.] but was [" + kvp.key + "]");
|
||||
}
|
||||
if (kvp.value.isEmpty()) {
|
||||
throw new UserError(ExitCodes.USAGE, "Elasticsearch setting [" + kvp.key + "] must not be empty");
|
||||
}
|
||||
esSettings.put(kvp.key, kvp.value);
|
||||
}
|
||||
|
||||
init(daemonize, pidFile, esSettings);
|
||||
init(daemonize, pidFile, settings);
|
||||
}
|
||||
|
||||
void init(final boolean daemonize, final String pidFile, final Map<String, String> esSettings) {
|
||||
|
|
|
@ -19,15 +19,15 @@
|
|||
|
||||
package org.elasticsearch.cli;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
import joptsimple.OptionException;
|
||||
import joptsimple.OptionParser;
|
||||
import joptsimple.OptionSet;
|
||||
import joptsimple.OptionSpec;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
/**
|
||||
* An action to execute within a cli.
|
||||
*/
|
||||
|
@ -112,4 +112,5 @@ public abstract class Command {
|
|||
*
|
||||
* Any runtime user errors (like an input file that does not exist), should throw a {@link UserError}. */
|
||||
protected abstract void execute(Terminal terminal, OptionSet options) throws Exception;
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,77 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cli;
|
||||
|
||||
import joptsimple.OptionSet;
|
||||
import joptsimple.OptionSpec;
|
||||
import joptsimple.util.KeyValuePair;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
|
||||
public abstract class SettingCommand extends Command {
|
||||
|
||||
private final OptionSpec<KeyValuePair> settingOption;
|
||||
|
||||
public SettingCommand(String description) {
|
||||
super(description);
|
||||
this.settingOption = parser.accepts("E", "Configure a setting").withRequiredArg().ofType(KeyValuePair.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void execute(Terminal terminal, OptionSet options) throws Exception {
|
||||
final Map<String, String> settings = new HashMap<>();
|
||||
for (final KeyValuePair kvp : settingOption.values(options)) {
|
||||
if (kvp.value.isEmpty()) {
|
||||
throw new UserError(ExitCodes.USAGE, "Setting [" + kvp.key + "] must not be empty");
|
||||
}
|
||||
settings.put(kvp.key, kvp.value);
|
||||
}
|
||||
|
||||
putSystemPropertyIfSettingIsMissing(settings, "path.conf", "es.path.conf");
|
||||
putSystemPropertyIfSettingIsMissing(settings, "path.data", "es.path.data");
|
||||
putSystemPropertyIfSettingIsMissing(settings, "path.home", "es.path.home");
|
||||
putSystemPropertyIfSettingIsMissing(settings, "path.logs", "es.path.logs");
|
||||
|
||||
execute(terminal, options, settings);
|
||||
}
|
||||
|
||||
protected static void putSystemPropertyIfSettingIsMissing(final Map<String, String> settings, final String setting, final String key) {
|
||||
final String value = System.getProperty(key);
|
||||
if (value != null) {
|
||||
if (settings.containsKey(setting)) {
|
||||
final String message =
|
||||
String.format(
|
||||
Locale.ROOT,
|
||||
"duplicate setting [%s] found via command-line [%s] and system property [%s]",
|
||||
setting,
|
||||
settings.get(setting),
|
||||
value);
|
||||
throw new IllegalArgumentException(message);
|
||||
} else {
|
||||
settings.put(setting, value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected abstract void execute(Terminal terminal, OptionSet options, Map<String, String> settings) throws Exception;
|
||||
|
||||
}
|
|
@ -42,12 +42,6 @@ import org.elasticsearch.action.get.MultiGetResponse;
|
|||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.action.percolate.MultiPercolateRequest;
|
||||
import org.elasticsearch.action.percolate.MultiPercolateRequestBuilder;
|
||||
import org.elasticsearch.action.percolate.MultiPercolateResponse;
|
||||
import org.elasticsearch.action.percolate.PercolateRequest;
|
||||
import org.elasticsearch.action.percolate.PercolateRequestBuilder;
|
||||
import org.elasticsearch.action.percolate.PercolateResponse;
|
||||
import org.elasticsearch.action.search.ClearScrollRequest;
|
||||
import org.elasticsearch.action.search.ClearScrollRequestBuilder;
|
||||
import org.elasticsearch.action.search.ClearScrollResponse;
|
||||
|
@ -419,36 +413,6 @@ public interface Client extends ElasticsearchClient, Releasable {
|
|||
*/
|
||||
MultiTermVectorsRequestBuilder prepareMultiTermVectors();
|
||||
|
||||
/**
|
||||
* Percolates a request returning the matches documents.
|
||||
*/
|
||||
ActionFuture<PercolateResponse> percolate(PercolateRequest request);
|
||||
|
||||
/**
|
||||
* Percolates a request returning the matches documents.
|
||||
*/
|
||||
void percolate(PercolateRequest request, ActionListener<PercolateResponse> listener);
|
||||
|
||||
/**
|
||||
* Percolates a request returning the matches documents.
|
||||
*/
|
||||
PercolateRequestBuilder preparePercolate();
|
||||
|
||||
/**
|
||||
* Performs multiple percolate requests.
|
||||
*/
|
||||
ActionFuture<MultiPercolateResponse> multiPercolate(MultiPercolateRequest request);
|
||||
|
||||
/**
|
||||
* Performs multiple percolate requests.
|
||||
*/
|
||||
void multiPercolate(MultiPercolateRequest request, ActionListener<MultiPercolateResponse> listener);
|
||||
|
||||
/**
|
||||
* Performs multiple percolate requests.
|
||||
*/
|
||||
MultiPercolateRequestBuilder prepareMultiPercolate();
|
||||
|
||||
/**
|
||||
* Computes a score explanation for the specified request.
|
||||
*
|
||||
|
|
|
@ -295,14 +295,6 @@ import org.elasticsearch.action.ingest.SimulatePipelineRequest;
|
|||
import org.elasticsearch.action.ingest.SimulatePipelineRequestBuilder;
|
||||
import org.elasticsearch.action.ingest.SimulatePipelineResponse;
|
||||
import org.elasticsearch.action.ingest.WritePipelineResponse;
|
||||
import org.elasticsearch.action.percolate.MultiPercolateAction;
|
||||
import org.elasticsearch.action.percolate.MultiPercolateRequest;
|
||||
import org.elasticsearch.action.percolate.MultiPercolateRequestBuilder;
|
||||
import org.elasticsearch.action.percolate.MultiPercolateResponse;
|
||||
import org.elasticsearch.action.percolate.PercolateAction;
|
||||
import org.elasticsearch.action.percolate.PercolateRequest;
|
||||
import org.elasticsearch.action.percolate.PercolateRequestBuilder;
|
||||
import org.elasticsearch.action.percolate.PercolateResponse;
|
||||
import org.elasticsearch.action.search.ClearScrollAction;
|
||||
import org.elasticsearch.action.search.ClearScrollRequest;
|
||||
import org.elasticsearch.action.search.ClearScrollRequestBuilder;
|
||||
|
@ -623,36 +615,6 @@ public abstract class AbstractClient extends AbstractComponent implements Client
|
|||
return new MultiTermVectorsRequestBuilder(this, MultiTermVectorsAction.INSTANCE);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionFuture<PercolateResponse> percolate(final PercolateRequest request) {
|
||||
return execute(PercolateAction.INSTANCE, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void percolate(final PercolateRequest request, final ActionListener<PercolateResponse> listener) {
|
||||
execute(PercolateAction.INSTANCE, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public PercolateRequestBuilder preparePercolate() {
|
||||
return new PercolateRequestBuilder(this, PercolateAction.INSTANCE);
|
||||
}
|
||||
|
||||
@Override
|
||||
public MultiPercolateRequestBuilder prepareMultiPercolate() {
|
||||
return new MultiPercolateRequestBuilder(this, MultiPercolateAction.INSTANCE);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void multiPercolate(MultiPercolateRequest request, ActionListener<MultiPercolateResponse> listener) {
|
||||
execute(MultiPercolateAction.INSTANCE, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionFuture<MultiPercolateResponse> multiPercolate(MultiPercolateRequest request) {
|
||||
return execute(MultiPercolateAction.INSTANCE, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ExplainRequestBuilder prepareExplain(String index, String type, String id) {
|
||||
return new ExplainRequestBuilder(this, ExplainAction.INSTANCE, index, type, id);
|
||||
|
|
|
@ -49,6 +49,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDeci
|
|||
import org.elasticsearch.cluster.routing.allocation.decider.NodeVersionAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.RebalanceOnlyWhenActiveAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryActiveAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider;
|
||||
|
@ -79,6 +80,7 @@ public class ClusterModule extends AbstractModule {
|
|||
new Setting<>("cluster.routing.allocation.type", BALANCED_ALLOCATOR, Function.identity(), Property.NodeScope);
|
||||
public static final List<Class<? extends AllocationDecider>> DEFAULT_ALLOCATION_DECIDERS =
|
||||
Collections.unmodifiableList(Arrays.asList(
|
||||
MaxRetryAllocationDecider.class,
|
||||
SameShardAllocationDecider.class,
|
||||
FilterAllocationDecider.class,
|
||||
ReplicaAfterPrimaryActiveAllocationDecider.class,
|
||||
|
|
|
@ -42,7 +42,6 @@ import org.elasticsearch.index.IndexService;
|
|||
import org.elasticsearch.index.NodeServicesProvider;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.percolator.PercolatorFieldMapper;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.InvalidTypeNameException;
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.cluster.routing;
|
|||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.Randomness;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -331,15 +332,13 @@ public class IndexShardRoutingTable implements Iterable<ShardRouting> {
|
|||
|
||||
public ShardIterator onlyNodeActiveInitializingShardsIt(String nodeId) {
|
||||
ArrayList<ShardRouting> ordered = new ArrayList<>(activeShards.size() + allInitializingShards.size());
|
||||
// fill it in a randomized fashion
|
||||
for (int i = 0; i < activeShards.size(); i++) {
|
||||
ShardRouting shardRouting = activeShards.get(i);
|
||||
int seed = shuffler.nextSeed();
|
||||
for (ShardRouting shardRouting : shuffler.shuffle(activeShards, seed)) {
|
||||
if (nodeId.equals(shardRouting.currentNodeId())) {
|
||||
ordered.add(shardRouting);
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < allInitializingShards.size(); i++) {
|
||||
ShardRouting shardRouting = allInitializingShards.get(i);
|
||||
for (ShardRouting shardRouting : shuffler.shuffle(allInitializingShards, seed)) {
|
||||
if (nodeId.equals(shardRouting.currentNodeId())) {
|
||||
ordered.add(shardRouting);
|
||||
}
|
||||
|
@ -347,26 +346,31 @@ public class IndexShardRoutingTable implements Iterable<ShardRouting> {
|
|||
return new PlainShardIterator(shardId, ordered);
|
||||
}
|
||||
|
||||
public ShardIterator onlyNodeSelectorActiveInitializingShardsIt(String nodeAttributes, DiscoveryNodes discoveryNodes) {
|
||||
return onlyNodeSelectorActiveInitializingShardsIt(new String[] {nodeAttributes}, discoveryNodes);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns shards based on nodeAttributes given such as node name , node attribute, node IP
|
||||
* Supports node specifications in cluster API
|
||||
*/
|
||||
public ShardIterator onlyNodeSelectorActiveInitializingShardsIt(String nodeAttribute, DiscoveryNodes discoveryNodes) {
|
||||
public ShardIterator onlyNodeSelectorActiveInitializingShardsIt(String[] nodeAttributes, DiscoveryNodes discoveryNodes) {
|
||||
ArrayList<ShardRouting> ordered = new ArrayList<>(activeShards.size() + allInitializingShards.size());
|
||||
Set<String> selectedNodes = Sets.newHashSet(discoveryNodes.resolveNodesIds(nodeAttribute));
|
||||
|
||||
for (ShardRouting shardRouting : activeShards) {
|
||||
Set<String> selectedNodes = Sets.newHashSet(discoveryNodes.resolveNodesIds(nodeAttributes));
|
||||
int seed = shuffler.nextSeed();
|
||||
for (ShardRouting shardRouting : shuffler.shuffle(activeShards, seed)) {
|
||||
if (selectedNodes.contains(shardRouting.currentNodeId())) {
|
||||
ordered.add(shardRouting);
|
||||
}
|
||||
}
|
||||
for (ShardRouting shardRouting : allInitializingShards) {
|
||||
for (ShardRouting shardRouting : shuffler.shuffle(allInitializingShards, seed)) {
|
||||
if (selectedNodes.contains(shardRouting.currentNodeId())) {
|
||||
ordered.add(shardRouting);
|
||||
}
|
||||
}
|
||||
if (ordered.isEmpty()) {
|
||||
throw new IllegalArgumentException("No data node with criteria [" + nodeAttribute + "] found");
|
||||
throw new IllegalArgumentException("no data nodes with critera(s) " +
|
||||
Strings.arrayToCommaDelimitedString(nodeAttributes) + "] found for shard:" + shardId());
|
||||
}
|
||||
return new PlainShardIterator(shardId, ordered);
|
||||
}
|
||||
|
|
|
@ -177,8 +177,8 @@ public class OperationRouting extends AbstractComponent {
|
|||
ensureNodeIdExists(nodes, nodeId);
|
||||
return indexShard.onlyNodeActiveInitializingShardsIt(nodeId);
|
||||
case ONLY_NODES:
|
||||
String nodeAttribute = preference.substring(Preference.ONLY_NODES.type().length() + 1);
|
||||
return indexShard.onlyNodeSelectorActiveInitializingShardsIt(nodeAttribute, nodes);
|
||||
String nodeAttributes = preference.substring(Preference.ONLY_NODES.type().length() + 1);
|
||||
return indexShard.onlyNodeSelectorActiveInitializingShardsIt(nodeAttributes.split(","), nodes);
|
||||
default:
|
||||
throw new IllegalArgumentException("unknown preference [" + preferenceType + "]");
|
||||
}
|
||||
|
|
|
@ -48,7 +48,6 @@ public final class UnassignedInfo implements ToXContent, Writeable {
|
|||
public static final Setting<TimeValue> INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING =
|
||||
Setting.timeSetting("index.unassigned.node_left.delayed_timeout", DEFAULT_DELAYED_NODE_LEFT_TIMEOUT, Property.Dynamic,
|
||||
Property.IndexScope);
|
||||
|
||||
/**
|
||||
* Reason why the shard is in unassigned state.
|
||||
* <p>
|
||||
|
@ -103,7 +102,11 @@ public final class UnassignedInfo implements ToXContent, Writeable {
|
|||
/**
|
||||
* A better replica location is identified and causes the existing replica allocation to be cancelled.
|
||||
*/
|
||||
REALLOCATED_REPLICA;
|
||||
REALLOCATED_REPLICA,
|
||||
/**
|
||||
* Unassigned as a result of a failed primary while the replica was initializing.
|
||||
*/
|
||||
PRIMARY_FAILED;
|
||||
}
|
||||
|
||||
private final Reason reason;
|
||||
|
@ -112,6 +115,7 @@ public final class UnassignedInfo implements ToXContent, Writeable {
|
|||
private final long lastComputedLeftDelayNanos; // how long to delay shard allocation, not serialized (always positive, 0 means no delay)
|
||||
private final String message;
|
||||
private final Throwable failure;
|
||||
private final int failedAllocations;
|
||||
|
||||
/**
|
||||
* creates an UnassingedInfo object based **current** time
|
||||
|
@ -120,7 +124,7 @@ public final class UnassignedInfo implements ToXContent, Writeable {
|
|||
* @param message more information about cause.
|
||||
**/
|
||||
public UnassignedInfo(Reason reason, String message) {
|
||||
this(reason, message, null, System.nanoTime(), System.currentTimeMillis());
|
||||
this(reason, message, null, reason == Reason.ALLOCATION_FAILED ? 1 : 0, System.nanoTime(), System.currentTimeMillis());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -130,13 +134,16 @@ public final class UnassignedInfo implements ToXContent, Writeable {
|
|||
* @param unassignedTimeNanos the time to use as the base for any delayed re-assignment calculation
|
||||
* @param unassignedTimeMillis the time of unassignment used to display to in our reporting.
|
||||
*/
|
||||
public UnassignedInfo(Reason reason, @Nullable String message, @Nullable Throwable failure, long unassignedTimeNanos, long unassignedTimeMillis) {
|
||||
public UnassignedInfo(Reason reason, @Nullable String message, @Nullable Throwable failure, int failedAllocations, long unassignedTimeNanos, long unassignedTimeMillis) {
|
||||
this.reason = reason;
|
||||
this.unassignedTimeMillis = unassignedTimeMillis;
|
||||
this.unassignedTimeNanos = unassignedTimeNanos;
|
||||
this.lastComputedLeftDelayNanos = 0L;
|
||||
this.message = message;
|
||||
this.failure = failure;
|
||||
this.failedAllocations = failedAllocations;
|
||||
assert (failedAllocations > 0) == (reason == Reason.ALLOCATION_FAILED):
|
||||
"failedAllocations: " + failedAllocations + " for reason " + reason;
|
||||
assert !(message == null && failure != null) : "provide a message if a failure exception is provided";
|
||||
}
|
||||
|
||||
|
@ -147,17 +154,19 @@ public final class UnassignedInfo implements ToXContent, Writeable {
|
|||
this.lastComputedLeftDelayNanos = newComputedLeftDelayNanos;
|
||||
this.message = unassignedInfo.message;
|
||||
this.failure = unassignedInfo.failure;
|
||||
this.failedAllocations = unassignedInfo.failedAllocations;
|
||||
}
|
||||
|
||||
public UnassignedInfo(StreamInput in) throws IOException {
|
||||
this.reason = Reason.values()[(int) in.readByte()];
|
||||
this.unassignedTimeMillis = in.readLong();
|
||||
// As System.nanoTime() cannot be compared across different JVMs, reset it to now.
|
||||
// This means that in master failover situations, elapsed delay time is forgotten.
|
||||
// This means that in master fail-over situations, elapsed delay time is forgotten.
|
||||
this.unassignedTimeNanos = System.nanoTime();
|
||||
this.lastComputedLeftDelayNanos = 0L;
|
||||
this.message = in.readOptionalString();
|
||||
this.failure = in.readThrowable();
|
||||
this.failedAllocations = in.readVInt();
|
||||
}
|
||||
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
|
@ -166,12 +175,18 @@ public final class UnassignedInfo implements ToXContent, Writeable {
|
|||
// Do not serialize unassignedTimeNanos as System.nanoTime() cannot be compared across different JVMs
|
||||
out.writeOptionalString(message);
|
||||
out.writeThrowable(failure);
|
||||
out.writeVInt(failedAllocations);
|
||||
}
|
||||
|
||||
public UnassignedInfo readFrom(StreamInput in) throws IOException {
|
||||
return new UnassignedInfo(in);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the number of previously failed allocations of this shard.
|
||||
*/
|
||||
public int getNumFailedAllocations() { return failedAllocations; }
|
||||
|
||||
/**
|
||||
* The reason why the shard is unassigned.
|
||||
*/
|
||||
|
@ -325,7 +340,11 @@ public final class UnassignedInfo implements ToXContent, Writeable {
|
|||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("[reason=").append(reason).append("]");
|
||||
sb.append(", at[").append(DATE_TIME_FORMATTER.printer().print(unassignedTimeMillis)).append("]");
|
||||
if (failedAllocations > 0) {
|
||||
sb.append(", failed_attempts[").append(failedAllocations).append("]");
|
||||
}
|
||||
String details = getDetails();
|
||||
|
||||
if (details != null) {
|
||||
sb.append(", details[").append(details).append("]");
|
||||
}
|
||||
|
@ -342,6 +361,9 @@ public final class UnassignedInfo implements ToXContent, Writeable {
|
|||
builder.startObject("unassigned_info");
|
||||
builder.field("reason", reason);
|
||||
builder.field("at", DATE_TIME_FORMATTER.printer().print(unassignedTimeMillis));
|
||||
if (failedAllocations > 0) {
|
||||
builder.field("failed_attempts", failedAllocations);
|
||||
}
|
||||
String details = getDetails();
|
||||
if (details != null) {
|
||||
builder.field("details", details);
|
||||
|
|
|
@ -222,8 +222,10 @@ public class AllocationService extends AbstractComponent {
|
|||
List<FailedRerouteAllocation.FailedShard> orderedFailedShards = new ArrayList<>(failedShards);
|
||||
orderedFailedShards.sort(Comparator.comparing(failedShard -> failedShard.shard.primary()));
|
||||
for (FailedRerouteAllocation.FailedShard failedShard : orderedFailedShards) {
|
||||
UnassignedInfo unassignedInfo = failedShard.shard.unassignedInfo();
|
||||
final int failedAllocations = unassignedInfo != null ? unassignedInfo.getNumFailedAllocations() : 0;
|
||||
changed |= applyFailedShard(allocation, failedShard.shard, true, new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, failedShard.message, failedShard.failure,
|
||||
System.nanoTime(), System.currentTimeMillis()));
|
||||
failedAllocations + 1, System.nanoTime(), System.currentTimeMillis()));
|
||||
}
|
||||
if (!changed) {
|
||||
return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
|
||||
|
@ -257,16 +259,13 @@ public class AllocationService extends AbstractComponent {
|
|||
.collect(Collectors.joining(", "));
|
||||
}
|
||||
|
||||
public RoutingAllocation.Result reroute(ClusterState clusterState, AllocationCommands commands) {
|
||||
return reroute(clusterState, commands, false);
|
||||
}
|
||||
|
||||
public RoutingAllocation.Result reroute(ClusterState clusterState, AllocationCommands commands, boolean explain) {
|
||||
public RoutingAllocation.Result reroute(ClusterState clusterState, AllocationCommands commands, boolean explain, boolean retryFailed) {
|
||||
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
|
||||
// we don't shuffle the unassigned shards here, to try and get as close as possible to
|
||||
// a consistent result of the effect the commands have on the routing
|
||||
// this allows systems to dry run the commands, see the resulting cluster state, and act on it
|
||||
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState, clusterInfoService.getClusterInfo(), currentNanoTime());
|
||||
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState,
|
||||
clusterInfoService.getClusterInfo(), currentNanoTime(), retryFailed);
|
||||
// don't short circuit deciders, we want a full explanation
|
||||
allocation.debugDecision(true);
|
||||
// we ignore disable allocation, because commands are explicit
|
||||
|
@ -305,7 +304,8 @@ public class AllocationService extends AbstractComponent {
|
|||
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
|
||||
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
|
||||
routingNodes.unassigned().shuffle();
|
||||
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState, clusterInfoService.getClusterInfo(), currentNanoTime());
|
||||
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState,
|
||||
clusterInfoService.getClusterInfo(), currentNanoTime(), false);
|
||||
allocation.debugDecision(debug);
|
||||
if (!reroute(allocation)) {
|
||||
return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
|
||||
|
@ -437,7 +437,7 @@ public class AllocationService extends AbstractComponent {
|
|||
// now, go over all the shards routing on the node, and fail them
|
||||
for (ShardRouting shardRouting : node.copyShards()) {
|
||||
UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.NODE_LEFT, "node_left[" + node.nodeId() + "]", null,
|
||||
allocation.getCurrentNanoTime(), System.currentTimeMillis());
|
||||
0, allocation.getCurrentNanoTime(), System.currentTimeMillis());
|
||||
applyFailedShard(allocation, shardRouting, false, unassignedInfo);
|
||||
}
|
||||
// its a dead node, remove it, note, its important to remove it *after* we apply failed shard
|
||||
|
@ -457,8 +457,8 @@ public class AllocationService extends AbstractComponent {
|
|||
boolean changed = false;
|
||||
for (ShardRouting routing : replicas) {
|
||||
changed |= applyFailedShard(allocation, routing, false,
|
||||
new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, "primary failed while replica initializing",
|
||||
null, allocation.getCurrentNanoTime(), System.currentTimeMillis()));
|
||||
new UnassignedInfo(UnassignedInfo.Reason.PRIMARY_FAILED, "primary failed while replica initializing",
|
||||
null, 0, allocation.getCurrentNanoTime(), System.currentTimeMillis()));
|
||||
}
|
||||
return changed;
|
||||
}
|
||||
|
|
|
@ -58,7 +58,7 @@ public class FailedRerouteAllocation extends RoutingAllocation {
|
|||
private final List<FailedShard> failedShards;
|
||||
|
||||
public FailedRerouteAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState, List<FailedShard> failedShards, ClusterInfo clusterInfo) {
|
||||
super(deciders, routingNodes, clusterState, clusterInfo, System.nanoTime());
|
||||
super(deciders, routingNodes, clusterState, clusterInfo, System.nanoTime(), false);
|
||||
this.failedShards = failedShards;
|
||||
}
|
||||
|
||||
|
|
|
@ -134,6 +134,8 @@ public class RoutingAllocation {
|
|||
|
||||
private boolean ignoreDisable = false;
|
||||
|
||||
private final boolean retryFailed;
|
||||
|
||||
private boolean debugDecision = false;
|
||||
|
||||
private boolean hasPendingAsyncFetch = false;
|
||||
|
@ -148,7 +150,7 @@ public class RoutingAllocation {
|
|||
* @param clusterState cluster state before rerouting
|
||||
* @param currentNanoTime the nano time to use for all delay allocation calculation (typically {@link System#nanoTime()})
|
||||
*/
|
||||
public RoutingAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState, ClusterInfo clusterInfo, long currentNanoTime) {
|
||||
public RoutingAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState, ClusterInfo clusterInfo, long currentNanoTime, boolean retryFailed) {
|
||||
this.deciders = deciders;
|
||||
this.routingNodes = routingNodes;
|
||||
this.metaData = clusterState.metaData();
|
||||
|
@ -156,6 +158,7 @@ public class RoutingAllocation {
|
|||
this.customs = clusterState.customs();
|
||||
this.clusterInfo = clusterInfo;
|
||||
this.currentNanoTime = currentNanoTime;
|
||||
this.retryFailed = retryFailed;
|
||||
}
|
||||
|
||||
/** returns the nano time captured at the beginning of the allocation. used to make sure all time based decisions are aligned */
|
||||
|
@ -297,4 +300,8 @@ public class RoutingAllocation {
|
|||
public void setHasPendingAsyncFetch() {
|
||||
this.hasPendingAsyncFetch = true;
|
||||
}
|
||||
|
||||
public boolean isRetryFailed() {
|
||||
return retryFailed;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ public class StartedRerouteAllocation extends RoutingAllocation {
|
|||
private final List<? extends ShardRouting> startedShards;
|
||||
|
||||
public StartedRerouteAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState, List<? extends ShardRouting> startedShards, ClusterInfo clusterInfo) {
|
||||
super(deciders, routingNodes, clusterState, clusterInfo, System.nanoTime());
|
||||
super(deciders, routingNodes, clusterState, clusterInfo, System.nanoTime(), false);
|
||||
this.startedShards = startedShards;
|
||||
}
|
||||
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
|
||||
|
@ -228,4 +229,22 @@ public abstract class AbstractAllocateAllocationCommand implements AllocationCom
|
|||
|
||||
protected void extraXContent(XContentBuilder builder) throws IOException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null || getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
AbstractAllocateAllocationCommand other = (AbstractAllocateAllocationCommand) obj;
|
||||
// Override equals and hashCode for testing
|
||||
return Objects.equals(index, other.index) &&
|
||||
Objects.equals(shardId, other.shardId) &&
|
||||
Objects.equals(node, other.node);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
// Override equals and hashCode for testing
|
||||
return Objects.hash(index, shardId, node);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -125,7 +125,7 @@ public class AllocateEmptyPrimaryAllocationCommand extends BasePrimaryAllocation
|
|||
// we need to move the unassigned info back to treat it as if it was index creation
|
||||
unassignedInfoToUpdate = new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED,
|
||||
"force empty allocation from previous reason " + shardRouting.unassignedInfo().getReason() + ", " + shardRouting.unassignedInfo().getMessage(),
|
||||
shardRouting.unassignedInfo().getFailure(), System.nanoTime(), System.currentTimeMillis());
|
||||
shardRouting.unassignedInfo().getFailure(), 0, System.nanoTime(), System.currentTimeMillis());
|
||||
}
|
||||
|
||||
initializeUnassignedShard(allocation, routingNodes, routingNode, shardRouting, unassignedInfoToUpdate);
|
||||
|
|
|
@ -136,6 +136,4 @@ public class AllocateReplicaAllocationCommand extends AbstractAllocateAllocation
|
|||
initializeUnassignedShard(allocation, routingNodes, routingNode, shardRouting);
|
||||
return new RerouteExplanation(this, decision);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -22,13 +22,16 @@ package org.elasticsearch.cluster.routing.allocation.command;
|
|||
import org.elasticsearch.cluster.routing.allocation.RerouteExplanation;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteable;
|
||||
import org.elasticsearch.common.network.NetworkModule;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* This interface defines the basic methods of commands for allocation
|
||||
* A command to move shards in some way.
|
||||
*
|
||||
* Commands are registered in {@link NetworkModule}.
|
||||
*/
|
||||
public interface AllocationCommand extends NamedWriteable, ToXContent {
|
||||
interface Parser<T extends AllocationCommand> {
|
||||
|
|
|
@ -20,12 +20,12 @@
|
|||
package org.elasticsearch.cluster.routing.allocation.command;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.action.support.ToXContentToBytes;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingExplanations;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
|
@ -33,12 +33,13 @@ import java.io.IOException;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* A simple {@link AllocationCommand} composite managing several
|
||||
* {@link AllocationCommand} implementations
|
||||
*/
|
||||
public class AllocationCommands {
|
||||
public class AllocationCommands extends ToXContentToBytes {
|
||||
private final List<AllocationCommand> commands = new ArrayList<>();
|
||||
|
||||
/**
|
||||
|
@ -171,21 +172,31 @@ public class AllocationCommands {
|
|||
return commands;
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes {@link AllocationCommands} to a {@link XContentBuilder}
|
||||
*
|
||||
* @param commands {@link AllocationCommands} to write
|
||||
* @param builder {@link XContentBuilder} to use
|
||||
* @param params Parameters to use for building
|
||||
* @throws IOException if something bad happens while building the content
|
||||
*/
|
||||
public static void toXContent(AllocationCommands commands, XContentBuilder builder, ToXContent.Params params) throws IOException {
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startArray("commands");
|
||||
for (AllocationCommand command : commands.commands) {
|
||||
for (AllocationCommand command : commands) {
|
||||
builder.startObject();
|
||||
builder.field(command.name(), command);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null || getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
AllocationCommands other = (AllocationCommands) obj;
|
||||
// Override equals and hashCode for testing
|
||||
return Objects.equals(commands, other.commands);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
// Override equals and hashCode for testing
|
||||
return Objects.hashCode(commands);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -83,4 +83,18 @@ public abstract class BasePrimaryAllocationCommand extends AbstractAllocateAlloc
|
|||
protected void extraXContent(XContentBuilder builder) throws IOException {
|
||||
builder.field(ACCEPT_DATA_LOSS_FIELD, acceptDataLoss);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (false == super.equals(obj)) {
|
||||
return false;
|
||||
}
|
||||
BasePrimaryAllocationCommand other = (BasePrimaryAllocationCommand) obj;
|
||||
return acceptDataLoss == other.acceptDataLoss;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return 31 * super.hashCode() + Boolean.hashCode(acceptDataLoss);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING;
|
||||
|
||||
|
@ -240,4 +241,23 @@ public class CancelAllocationCommand implements AllocationCommand {
|
|||
}
|
||||
return new CancelAllocationCommand(index, shardId, nodeId, allowPrimary);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null || getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
CancelAllocationCommand other = (CancelAllocationCommand) obj;
|
||||
// Override equals and hashCode for testing
|
||||
return Objects.equals(index, other.index) &&
|
||||
Objects.equals(shardId, other.shardId) &&
|
||||
Objects.equals(node, other.node) &&
|
||||
Objects.equals(allowPrimary, other.allowPrimary);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
// Override equals and hashCode for testing
|
||||
return Objects.hash(index, shardId, node, allowPrimary);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* A command that moves a shard from a specific node to another node.<br>
|
||||
|
@ -195,4 +196,23 @@ public class MoveAllocationCommand implements AllocationCommand {
|
|||
}
|
||||
return new MoveAllocationCommand(index, shardId, fromNode, toNode);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null || getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
MoveAllocationCommand other = (MoveAllocationCommand) obj;
|
||||
// Override equals and hashCode for testing
|
||||
return Objects.equals(index, other.index) &&
|
||||
Objects.equals(shardId, other.shardId) &&
|
||||
Objects.equals(fromNode, other.fromNode) &&
|
||||
Objects.equals(toNode, other.toNode);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
// Override equals and hashCode for testing
|
||||
return Objects.hash(index, shardId, fromNode, toNode);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,83 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation.decider;
|
||||
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
/**
|
||||
* An allocation decider that prevents shards from being allocated on any node if the shards allocation has been retried N times without
|
||||
* success. This means if a shard has been INITIALIZING N times in a row without being moved to STARTED the shard will be ignored until
|
||||
* the setting for <tt>index.allocation.max_retry</tt> is raised. The default value is <tt>5</tt>.
|
||||
* Note: This allocation decider also allows allocation of repeatedly failing shards when the <tt>/_cluster/reroute?retry_failed=true</tt>
|
||||
* API is manually invoked. This allows single retries without raising the limits.
|
||||
*
|
||||
* @see RoutingAllocation#isRetryFailed()
|
||||
*/
|
||||
public class MaxRetryAllocationDecider extends AllocationDecider {
|
||||
|
||||
public static final Setting<Integer> SETTING_ALLOCATION_MAX_RETRY = Setting.intSetting("index.allocation.max_retries", 5, 0,
|
||||
Setting.Property.Dynamic, Setting.Property.IndexScope);
|
||||
|
||||
public static final String NAME = "max_retry";
|
||||
|
||||
/**
|
||||
* Initializes a new {@link MaxRetryAllocationDecider}
|
||||
*
|
||||
* @param settings {@link Settings} used by this {@link AllocationDecider}
|
||||
*/
|
||||
@Inject
|
||||
public MaxRetryAllocationDecider(Settings settings) {
|
||||
super(settings);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Decision canAllocate(ShardRouting shardRouting, RoutingAllocation allocation) {
|
||||
UnassignedInfo unassignedInfo = shardRouting.unassignedInfo();
|
||||
if (unassignedInfo != null && unassignedInfo.getNumFailedAllocations() > 0) {
|
||||
final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index());
|
||||
final int maxRetry = SETTING_ALLOCATION_MAX_RETRY.get(indexMetaData.getSettings());
|
||||
if (allocation.isRetryFailed()) { // manual allocation - retry
|
||||
// if we are called via the _reroute API we ignore the failure counter and try to allocate
|
||||
// this improves the usability since people don't need to raise the limits to issue retries since a simple _reroute call is
|
||||
// enough to manually retry.
|
||||
return allocation.decision(Decision.YES, NAME, "shard has already failed allocating ["
|
||||
+ unassignedInfo.getNumFailedAllocations() + "] times vs. [" + maxRetry + "] retries allowed "
|
||||
+ unassignedInfo.toString() + " - retrying once on manual allocation");
|
||||
} else if (unassignedInfo.getNumFailedAllocations() >= maxRetry) {
|
||||
return allocation.decision(Decision.NO, NAME, "shard has already failed allocating ["
|
||||
+ unassignedInfo.getNumFailedAllocations() + "] times vs. [" + maxRetry + "] retries allowed "
|
||||
+ unassignedInfo.toString() + " - manually call [/_cluster/reroute?retry_failed=true] to retry");
|
||||
}
|
||||
}
|
||||
return allocation.decision(Decision.YES, NAME, "shard has no previous failures");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
|
||||
return canAllocate(shardRouting, allocation);
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -21,6 +21,7 @@ package org.elasticsearch.common;
|
|||
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Base64;
|
||||
import java.util.Random;
|
||||
|
||||
class RandomBasedUUIDGenerator implements UUIDGenerator {
|
||||
|
@ -54,14 +55,6 @@ class RandomBasedUUIDGenerator implements UUIDGenerator {
|
|||
* We set only the MSB of the variant*/
|
||||
randomBytes[8] &= 0x3f; /* clear the 2 most significant bits */
|
||||
randomBytes[8] |= 0x80; /* set the variant (MSB is set)*/
|
||||
try {
|
||||
byte[] encoded = Base64.encodeBytesToBytes(randomBytes, 0, randomBytes.length, Base64.URL_SAFE);
|
||||
// we know the bytes are 16, and not a multi of 3, so remove the 2 padding chars that are added
|
||||
assert encoded[encoded.length - 1] == '=';
|
||||
assert encoded[encoded.length - 2] == '=';
|
||||
return new String(encoded, 0, encoded.length - 2, Base64.PREFERRED_ENCODING);
|
||||
} catch (IOException e) {
|
||||
throw new IllegalStateException("should not be thrown");
|
||||
}
|
||||
return Base64.getUrlEncoder().withoutPadding().encodeToString(randomBytes);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,8 +19,7 @@
|
|||
|
||||
package org.elasticsearch.common;
|
||||
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Base64;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
/** These are essentially flake ids (http://boundary.com/blog/2012/01/12/flake-a-decentralized-k-ordered-unique-id-generator-in-erlang) but
|
||||
|
@ -80,15 +79,6 @@ class TimeBasedUUIDGenerator implements UUIDGenerator {
|
|||
|
||||
assert 9 + SECURE_MUNGED_ADDRESS.length == uuidBytes.length;
|
||||
|
||||
byte[] encoded;
|
||||
try {
|
||||
encoded = Base64.encodeBytesToBytes(uuidBytes, 0, uuidBytes.length, Base64.URL_SAFE);
|
||||
} catch (IOException e) {
|
||||
throw new IllegalStateException("should not be thrown", e);
|
||||
}
|
||||
|
||||
// We are a multiple of 3 bytes so we should not see any padding:
|
||||
assert encoded[encoded.length - 1] != '=';
|
||||
return new String(encoded, 0, encoded.length, Base64.PREFERRED_ENCODING);
|
||||
return Base64.getUrlEncoder().withoutPadding().encodeToString(uuidBytes);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,6 +30,8 @@ import java.util.List;
|
|||
*/
|
||||
public class BlobPath implements Iterable<String> {
|
||||
|
||||
private static final String SEPARATOR = "/";
|
||||
|
||||
private final List<String> paths;
|
||||
|
||||
public BlobPath() {
|
||||
|
@ -60,15 +62,12 @@ public class BlobPath implements Iterable<String> {
|
|||
return new BlobPath(Collections.unmodifiableList(paths));
|
||||
}
|
||||
|
||||
public String buildAsString(String separator) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
for (int i = 0; i < paths.size(); i++) {
|
||||
sb.append(paths.get(i));
|
||||
if (i < (paths.size() - 1)) {
|
||||
sb.append(separator);
|
||||
}
|
||||
public String buildAsString() {
|
||||
String p = String.join(SEPARATOR, paths);
|
||||
if (p.isEmpty()) {
|
||||
return p;
|
||||
}
|
||||
return sb.toString();
|
||||
return p + SEPARATOR;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -321,20 +321,15 @@ public class Joda {
|
|||
|
||||
public static class EpochTimeParser implements DateTimeParser {
|
||||
|
||||
private static final Pattern MILLI_SECOND_PRECISION_PATTERN = Pattern.compile("^-?\\d{1,13}$");
|
||||
private static final Pattern SECOND_PRECISION_PATTERN = Pattern.compile("^-?\\d{1,10}$");
|
||||
|
||||
private final boolean hasMilliSecondPrecision;
|
||||
private final Pattern pattern;
|
||||
|
||||
public EpochTimeParser(boolean hasMilliSecondPrecision) {
|
||||
this.hasMilliSecondPrecision = hasMilliSecondPrecision;
|
||||
this.pattern = hasMilliSecondPrecision ? MILLI_SECOND_PRECISION_PATTERN : SECOND_PRECISION_PATTERN;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int estimateParsedLength() {
|
||||
return hasMilliSecondPrecision ? 13 : 10;
|
||||
return hasMilliSecondPrecision ? 19 : 16;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -344,8 +339,7 @@ public class Joda {
|
|||
|
||||
if ((isPositive && isTooLong) ||
|
||||
// timestamps have to have UTC timezone
|
||||
bucket.getZone() != DateTimeZone.UTC ||
|
||||
pattern.matcher(text).matches() == false) {
|
||||
bucket.getZone() != DateTimeZone.UTC) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -378,7 +372,7 @@ public class Joda {
|
|||
|
||||
@Override
|
||||
public int estimatePrintedLength() {
|
||||
return hasMilliSecondPrecision ? 13 : 10;
|
||||
return hasMilliSecondPrecision ? 19 : 16;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,9 +19,10 @@
|
|||
|
||||
package org.elasticsearch.common.logging;
|
||||
|
||||
import org.apache.log4j.Java9Hack;
|
||||
import org.apache.log4j.PropertyConfigurator;
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.bootstrap.BootstrapInfo;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.SettingsException;
|
||||
import org.elasticsearch.env.Environment;
|
||||
|
@ -87,14 +88,17 @@ public class LogConfigurator {
|
|||
replacements.put("ttcc", "org.apache.log4j.TTCCLayout");
|
||||
replacements.put("xml", "org.apache.log4j.XMLLayout");
|
||||
REPLACEMENTS = unmodifiableMap(replacements);
|
||||
|
||||
if (Constants.JRE_IS_MINIMUM_JAVA9) {
|
||||
Java9Hack.fixLog4j();
|
||||
}
|
||||
}
|
||||
|
||||
private static boolean loaded;
|
||||
|
||||
/**
|
||||
* Consolidates settings and converts them into actual log4j settings, then initializes loggers and appenders.
|
||||
*
|
||||
* @param settings custom settings that should be applied
|
||||
* @param settings custom settings that should be applied
|
||||
* @param resolveConfig controls whether the logging conf file should be read too or not.
|
||||
*/
|
||||
public static void configure(Settings settings, boolean resolveConfig) {
|
||||
|
@ -109,7 +113,7 @@ public class LogConfigurator {
|
|||
if (resolveConfig) {
|
||||
resolveConfig(environment, settingsBuilder);
|
||||
}
|
||||
settingsBuilder.putProperties("es.", BootstrapInfo.getSystemProperties());
|
||||
|
||||
// add custom settings after config was added so that they are not overwritten by config
|
||||
settingsBuilder.put(settings);
|
||||
settingsBuilder.replacePropertyPlaceholders();
|
||||
|
|
|
@ -19,11 +19,6 @@
|
|||
|
||||
package org.elasticsearch.common.network;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.elasticsearch.action.support.replication.ReplicationTask;
|
||||
import org.elasticsearch.client.transport.TransportClientNodesService;
|
||||
import org.elasticsearch.client.transport.support.TransportProxyClient;
|
||||
|
@ -36,7 +31,6 @@ import org.elasticsearch.cluster.routing.allocation.command.AllocationCommandReg
|
|||
import org.elasticsearch.cluster.routing.allocation.command.CancelAllocationCommand;
|
||||
import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
|
@ -71,6 +65,12 @@ import org.elasticsearch.rest.action.admin.cluster.snapshots.restore.RestRestore
|
|||
import org.elasticsearch.rest.action.admin.cluster.snapshots.status.RestSnapshotsStatusAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.state.RestClusterStateAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.stats.RestClusterStatsAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestDeleteSearchTemplateAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestDeleteStoredScriptAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestGetSearchTemplateAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestGetStoredScriptAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestPutSearchTemplateAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestPutStoredScriptAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.tasks.RestPendingClusterTasksAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.alias.RestIndicesAliasesAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.alias.delete.RestIndexDeleteAliasesAction;
|
||||
|
@ -137,19 +137,11 @@ import org.elasticsearch.rest.action.ingest.RestGetPipelineAction;
|
|||
import org.elasticsearch.rest.action.ingest.RestPutPipelineAction;
|
||||
import org.elasticsearch.rest.action.ingest.RestSimulatePipelineAction;
|
||||
import org.elasticsearch.rest.action.main.RestMainAction;
|
||||
import org.elasticsearch.rest.action.percolate.RestMultiPercolateAction;
|
||||
import org.elasticsearch.rest.action.percolate.RestPercolateAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestDeleteStoredScriptAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestGetStoredScriptAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestPutStoredScriptAction;
|
||||
import org.elasticsearch.rest.action.search.RestClearScrollAction;
|
||||
import org.elasticsearch.rest.action.search.RestMultiSearchAction;
|
||||
import org.elasticsearch.rest.action.search.RestSearchAction;
|
||||
import org.elasticsearch.rest.action.search.RestSearchScrollAction;
|
||||
import org.elasticsearch.rest.action.suggest.RestSuggestAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestDeleteSearchTemplateAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestGetSearchTemplateAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestPutSearchTemplateAction;
|
||||
import org.elasticsearch.rest.action.termvectors.RestMultiTermVectorsAction;
|
||||
import org.elasticsearch.rest.action.termvectors.RestTermVectorsAction;
|
||||
import org.elasticsearch.rest.action.update.RestUpdateAction;
|
||||
|
@ -159,6 +151,9 @@ import org.elasticsearch.transport.TransportService;
|
|||
import org.elasticsearch.transport.local.LocalTransport;
|
||||
import org.elasticsearch.transport.netty.NettyTransport;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* A module to handle registering and binding all network related classes.
|
||||
*/
|
||||
|
@ -250,8 +245,6 @@ public class NetworkModule extends AbstractModule {
|
|||
RestMultiTermVectorsAction.class,
|
||||
RestBulkAction.class,
|
||||
RestUpdateAction.class,
|
||||
RestPercolateAction.class,
|
||||
RestMultiPercolateAction.class,
|
||||
|
||||
RestSearchAction.class,
|
||||
RestSearchScrollAction.class,
|
||||
|
@ -404,7 +397,7 @@ public class NetworkModule extends AbstractModule {
|
|||
* @param commandName the names under which the command should be parsed. The {@link ParseField#getPreferredName()} is special because
|
||||
* it is the name under which the command's reader is registered.
|
||||
*/
|
||||
public <T extends AllocationCommand> void registerAllocationCommand(Writeable.Reader<T> reader, AllocationCommand.Parser<T> parser,
|
||||
private <T extends AllocationCommand> void registerAllocationCommand(Writeable.Reader<T> reader, AllocationCommand.Parser<T> parser,
|
||||
ParseField commandName) {
|
||||
allocationCommandRegistry.register(parser, commandName);
|
||||
namedWriteableRegistry.register(AllocationCommand.class, commandName.getPreferredName(), reader);
|
||||
|
|
|
@ -375,7 +375,6 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
BaseRestHandler.MULTI_ALLOW_EXPLICIT_INDEX,
|
||||
ClusterName.CLUSTER_NAME_SETTING,
|
||||
Client.CLIENT_TYPE_SETTING_S,
|
||||
InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING,
|
||||
ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING,
|
||||
EsExecutors.PROCESSORS_SETTING,
|
||||
ThreadContext.DEFAULT_HEADERS_SETTING,
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.common.settings;
|
|||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.gateway.PrimaryShardAllocator;
|
||||
|
@ -35,12 +36,10 @@ import org.elasticsearch.index.engine.EngineConfig;
|
|||
import org.elasticsearch.index.fielddata.IndexFieldDataService;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.percolator.PercolatorQueryCache;
|
||||
import org.elasticsearch.index.similarity.SimilarityService;
|
||||
import org.elasticsearch.index.store.FsDirectoryService;
|
||||
import org.elasticsearch.index.store.IndexStore;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.index.IndexWarmer;
|
||||
import org.elasticsearch.indices.IndicesRequestCache;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
@ -59,6 +58,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
|
|||
public static final Predicate<String> INDEX_SETTINGS_KEY_PREDICATE = (s) -> s.startsWith(IndexMetaData.INDEX_SETTING_PREFIX);
|
||||
|
||||
public static final Set<Setting<?>> BUILT_IN_INDEX_SETTINGS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(
|
||||
MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY,
|
||||
IndexSettings.INDEX_TTL_DISABLE_PURGE_SETTING,
|
||||
IndexStore.INDEX_STORE_THROTTLE_TYPE_SETTING,
|
||||
IndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING,
|
||||
|
@ -126,7 +126,6 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
|
|||
FieldMapper.IGNORE_MALFORMED_SETTING,
|
||||
FieldMapper.COERCE_SETTING,
|
||||
Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING,
|
||||
PercolatorQueryCache.INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING,
|
||||
MapperService.INDEX_MAPPER_DYNAMIC_SETTING,
|
||||
MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING,
|
||||
MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING,
|
||||
|
|
|
@ -537,6 +537,10 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
return new Setting<>(key, fallbackSetting, Booleans::parseBooleanExact, properties);
|
||||
}
|
||||
|
||||
public static Setting<Boolean> boolSetting(String key, Function<Settings, String> defaultValueFn, Property... properties) {
|
||||
return new Setting<>(key, defaultValueFn, Booleans::parseBooleanExact, properties);
|
||||
}
|
||||
|
||||
public static Setting<ByteSizeValue> byteSizeSetting(String key, String percentage, Property... properties) {
|
||||
return new Setting<>(key, (s) -> percentage, (s) -> MemorySizeValue.parseBytesSizeValueOrHeapRatio(s, key), properties);
|
||||
}
|
||||
|
|
|
@ -58,9 +58,11 @@ import java.util.Set;
|
|||
import java.util.SortedMap;
|
||||
import java.util.TreeMap;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.elasticsearch.common.unit.ByteSizeValue.parseBytesSizeValue;
|
||||
import static org.elasticsearch.common.unit.SizeValue.parseSizeValue;
|
||||
|
@ -942,89 +944,54 @@ public final class Settings implements ToXContent {
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Puts all the properties with keys starting with the provided <tt>prefix</tt>.
|
||||
*
|
||||
* @param prefix The prefix to filter property key by
|
||||
* @param properties The properties to put
|
||||
* @return The builder
|
||||
*/
|
||||
public Builder putProperties(String prefix, Dictionary<Object, Object> properties) {
|
||||
for (Object property : Collections.list(properties.keys())) {
|
||||
String key = Objects.toString(property);
|
||||
String value = Objects.toString(properties.get(property));
|
||||
if (key.startsWith(prefix)) {
|
||||
map.put(key.substring(prefix.length()), value);
|
||||
public Builder putProperties(Map<String, String> esSettings, Predicate<String> keyPredicate, Function<String, String> keyFunction) {
|
||||
for (final Map.Entry<String, String> esSetting : esSettings.entrySet()) {
|
||||
final String key = esSetting.getKey();
|
||||
if (keyPredicate.test(key)) {
|
||||
map.put(keyFunction.apply(key), esSetting.getValue());
|
||||
}
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Puts all the properties with keys starting with the provided <tt>prefix</tt>.
|
||||
*
|
||||
* @param prefix The prefix to filter property key by
|
||||
* @param properties The properties to put
|
||||
* @return The builder
|
||||
*/
|
||||
public Builder putProperties(String prefix, Dictionary<Object, Object> properties, String ignorePrefix) {
|
||||
for (Object property : Collections.list(properties.keys())) {
|
||||
String key = Objects.toString(property);
|
||||
String value = Objects.toString(properties.get(property));
|
||||
if (key.startsWith(prefix)) {
|
||||
if (!key.startsWith(ignorePrefix)) {
|
||||
map.put(key.substring(prefix.length()), value);
|
||||
}
|
||||
}
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Runs across all the settings set on this builder and replaces <tt>${...}</tt> elements in the
|
||||
* each setting value according to the following logic:
|
||||
* <p>
|
||||
* First, tries to resolve it against a System property ({@link System#getProperty(String)}), next,
|
||||
* tries and resolve it against an environment variable ({@link System#getenv(String)}), and last, tries
|
||||
* and replace it with another setting already set on this builder.
|
||||
* Runs across all the settings set on this builder and
|
||||
* replaces <tt>${...}</tt> elements in each setting with
|
||||
* another setting already set on this builder.
|
||||
*/
|
||||
public Builder replacePropertyPlaceholders() {
|
||||
return replacePropertyPlaceholders(System::getenv);
|
||||
}
|
||||
|
||||
// visible for testing
|
||||
Builder replacePropertyPlaceholders(Function<String, String> getenv) {
|
||||
PropertyPlaceholder propertyPlaceholder = new PropertyPlaceholder("${", "}", false);
|
||||
PropertyPlaceholder.PlaceholderResolver placeholderResolver = new PropertyPlaceholder.PlaceholderResolver() {
|
||||
@Override
|
||||
public String resolvePlaceholder(String placeholderName) {
|
||||
if (placeholderName.startsWith("env.")) {
|
||||
// explicit env var prefix
|
||||
return System.getenv(placeholderName.substring("env.".length()));
|
||||
}
|
||||
String value = System.getProperty(placeholderName);
|
||||
if (value != null) {
|
||||
return value;
|
||||
}
|
||||
value = System.getenv(placeholderName);
|
||||
if (value != null) {
|
||||
return value;
|
||||
}
|
||||
return map.get(placeholderName);
|
||||
@Override
|
||||
public String resolvePlaceholder(String placeholderName) {
|
||||
final String value = getenv.apply(placeholderName);
|
||||
if (value != null) {
|
||||
return value;
|
||||
}
|
||||
return map.get(placeholderName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean shouldIgnoreMissing(String placeholderName) {
|
||||
// if its an explicit env var, we are ok with not having a value for it and treat it as optional
|
||||
if (placeholderName.startsWith("env.") || placeholderName.startsWith("prompt.")) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean shouldRemoveMissingPlaceholder(String placeholderName) {
|
||||
if (placeholderName.startsWith("prompt.")) {
|
||||
return false;
|
||||
}
|
||||
@Override
|
||||
public boolean shouldIgnoreMissing(String placeholderName) {
|
||||
if (placeholderName.startsWith("prompt.")) {
|
||||
return true;
|
||||
}
|
||||
};
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean shouldRemoveMissingPlaceholder(String placeholderName) {
|
||||
if (placeholderName.startsWith("prompt.")) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
};
|
||||
for (Map.Entry<String, String> entry : new HashMap<>(map).entrySet()) {
|
||||
String value = propertyPlaceholder.replacePlaceholders(entry.getKey(), entry.getValue(), placeholderResolver);
|
||||
// if the values exists and has length, we should maintain it in the map
|
||||
|
|
|
@ -62,6 +62,24 @@ public class GatewayAllocator extends AbstractComponent {
|
|||
this.replicaShardAllocator = new InternalReplicaShardAllocator(settings, storeAction);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if the given shard has an async fetch pending
|
||||
*/
|
||||
public boolean hasFetchPending(ShardId shardId, boolean primary) {
|
||||
if (primary) {
|
||||
AsyncShardFetch<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> fetch = asyncFetchStarted.get(shardId);
|
||||
if (fetch != null) {
|
||||
return fetch.getNumberOfInFlightFetches() > 0;
|
||||
}
|
||||
} else {
|
||||
AsyncShardFetch<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> fetch = asyncFetchStore.get(shardId);
|
||||
if (fetch != null) {
|
||||
return fetch.getNumberOfInFlightFetches() > 0;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
public void setReallocation(final ClusterService clusterService, final RoutingService routingService) {
|
||||
this.routingService = routingService;
|
||||
clusterService.add(new ClusterStateListener() {
|
||||
|
|
|
@ -108,7 +108,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
|
|||
currentNode, nodeWithHighestMatch);
|
||||
it.moveToUnassigned(new UnassignedInfo(UnassignedInfo.Reason.REALLOCATED_REPLICA,
|
||||
"existing allocation of replica to [" + currentNode + "] cancelled, sync id match found on node [" + nodeWithHighestMatch + "]",
|
||||
null, allocation.getCurrentNanoTime(), System.currentTimeMillis()));
|
||||
null, 0, allocation.getCurrentNanoTime(), System.currentTimeMillis()));
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -50,7 +50,6 @@ import org.elasticsearch.index.engine.EngineFactory;
|
|||
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldDataService;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.percolator.PercolatorQueryCache;
|
||||
import org.elasticsearch.index.query.ParsedQuery;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.shard.IndexEventListener;
|
||||
|
@ -151,11 +150,9 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
|
|||
this.indexStore = indexStore;
|
||||
indexFieldData.setListener(new FieldDataCacheListener(this));
|
||||
this.bitsetFilterCache = new BitsetFilterCache(indexSettings, new BitsetCacheListener(this));
|
||||
PercolatorQueryCache percolatorQueryCache = new PercolatorQueryCache(indexSettings, IndexService.this::newQueryShardContext);
|
||||
this.warmer = new IndexWarmer(indexSettings.getSettings(), threadPool,
|
||||
bitsetFilterCache.createListener(threadPool),
|
||||
percolatorQueryCache.createListener(threadPool));
|
||||
this.indexCache = new IndexCache(indexSettings, queryCache, bitsetFilterCache, percolatorQueryCache);
|
||||
bitsetFilterCache.createListener(threadPool));
|
||||
this.indexCache = new IndexCache(indexSettings, queryCache, bitsetFilterCache);
|
||||
this.engineFactory = engineFactory;
|
||||
// initialize this last -- otherwise if the wrapper requires any other member to be non-null we fail with an NPE
|
||||
this.searcherWrapper = wrapperFactory.newWrapper(this);
|
||||
|
@ -239,8 +236,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
|
|||
}
|
||||
}
|
||||
} finally {
|
||||
IOUtils.close(bitsetFilterCache, indexCache, indexFieldData, analysisService, refreshTask, fsyncTask,
|
||||
cache().getPercolatorQueryCache());
|
||||
IOUtils.close(bitsetFilterCache, indexCache, indexFieldData, analysisService, refreshTask, fsyncTask);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -443,7 +439,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
|
|||
return new QueryShardContext(
|
||||
indexSettings, indexCache.bitsetFilterCache(), indexFieldData, mapperService(),
|
||||
similarityService(), nodeServicesProvider.getScriptService(), nodeServicesProvider.getIndicesQueriesRegistry(),
|
||||
nodeServicesProvider.getClient(), indexCache.getPercolatorQueryCache(), indexReader,
|
||||
nodeServicesProvider.getClient(), indexReader,
|
||||
nodeServicesProvider.getClusterService().state()
|
||||
);
|
||||
}
|
||||
|
|
|
@ -88,6 +88,7 @@ public interface CharMatcher {
|
|||
case Character.CURRENCY_SYMBOL:
|
||||
case Character.MATH_SYMBOL:
|
||||
case Character.OTHER_SYMBOL:
|
||||
case Character.MODIFIER_SYMBOL:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
|
|
|
@ -33,13 +33,11 @@ import org.apache.lucene.analysis.util.CharArraySet;
|
|||
public final class FingerprintAnalyzer extends Analyzer {
|
||||
private final char separator;
|
||||
private final int maxOutputSize;
|
||||
private final boolean preserveOriginal;
|
||||
private final CharArraySet stopWords;
|
||||
|
||||
public FingerprintAnalyzer(CharArraySet stopWords, char separator, int maxOutputSize, boolean preserveOriginal) {
|
||||
public FingerprintAnalyzer(CharArraySet stopWords, char separator, int maxOutputSize) {
|
||||
this.separator = separator;
|
||||
this.maxOutputSize = maxOutputSize;
|
||||
this.preserveOriginal = preserveOriginal;
|
||||
this.stopWords = stopWords;
|
||||
}
|
||||
|
||||
|
@ -48,7 +46,7 @@ public final class FingerprintAnalyzer extends Analyzer {
|
|||
final Tokenizer tokenizer = new StandardTokenizer();
|
||||
TokenStream stream = tokenizer;
|
||||
stream = new LowerCaseFilter(stream);
|
||||
stream = new ASCIIFoldingFilter(stream, preserveOriginal);
|
||||
stream = new ASCIIFoldingFilter(stream, false);
|
||||
stream = new StopFilter(stream, stopWords);
|
||||
stream = new FingerprintFilter(stream, maxOutputSize, separator);
|
||||
return new TokenStreamComponents(tokenizer, stream);
|
||||
|
|
|
@ -34,10 +34,8 @@ import org.elasticsearch.index.IndexSettings;
|
|||
public class FingerprintAnalyzerProvider extends AbstractIndexAnalyzerProvider<Analyzer> {
|
||||
|
||||
public static ParseField MAX_OUTPUT_SIZE = FingerprintTokenFilterFactory.MAX_OUTPUT_SIZE;
|
||||
public static ParseField PRESERVE_ORIGINAL = ASCIIFoldingTokenFilterFactory.PRESERVE_ORIGINAL;
|
||||
|
||||
public static int DEFAULT_MAX_OUTPUT_SIZE = FingerprintTokenFilterFactory.DEFAULT_MAX_OUTPUT_SIZE;
|
||||
public static boolean DEFAULT_PRESERVE_ORIGINAL = ASCIIFoldingTokenFilterFactory.DEFAULT_PRESERVE_ORIGINAL;
|
||||
public static CharArraySet DEFAULT_STOP_WORDS = CharArraySet.EMPTY_SET;
|
||||
|
||||
private final FingerprintAnalyzer analyzer;
|
||||
|
@ -47,10 +45,9 @@ public class FingerprintAnalyzerProvider extends AbstractIndexAnalyzerProvider<A
|
|||
|
||||
char separator = FingerprintTokenFilterFactory.parseSeparator(settings);
|
||||
int maxOutputSize = settings.getAsInt(MAX_OUTPUT_SIZE.getPreferredName(),DEFAULT_MAX_OUTPUT_SIZE);
|
||||
boolean preserveOriginal = settings.getAsBoolean(PRESERVE_ORIGINAL.getPreferredName(), DEFAULT_PRESERVE_ORIGINAL);
|
||||
CharArraySet stopWords = Analysis.parseStopWords(env, settings, DEFAULT_STOP_WORDS);
|
||||
|
||||
this.analyzer = new FingerprintAnalyzer(stopWords, separator, maxOutputSize, preserveOriginal);
|
||||
this.analyzer = new FingerprintAnalyzer(stopWords, separator, maxOutputSize);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.elasticsearch.index.AbstractIndexComponent;
|
|||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
|
||||
import org.elasticsearch.index.cache.query.QueryCache;
|
||||
import org.elasticsearch.index.percolator.PercolatorQueryCache;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
|
@ -36,14 +35,11 @@ public class IndexCache extends AbstractIndexComponent implements Closeable {
|
|||
|
||||
private final QueryCache queryCache;
|
||||
private final BitsetFilterCache bitsetFilterCache;
|
||||
private final PercolatorQueryCache percolatorQueryCache;
|
||||
|
||||
public IndexCache(IndexSettings indexSettings, QueryCache queryCache, BitsetFilterCache bitsetFilterCache,
|
||||
PercolatorQueryCache percolatorQueryCache) {
|
||||
public IndexCache(IndexSettings indexSettings, QueryCache queryCache, BitsetFilterCache bitsetFilterCache) {
|
||||
super(indexSettings);
|
||||
this.queryCache = queryCache;
|
||||
this.bitsetFilterCache = bitsetFilterCache;
|
||||
this.percolatorQueryCache = percolatorQueryCache;
|
||||
}
|
||||
|
||||
public QueryCache query() {
|
||||
|
@ -57,13 +53,9 @@ public class IndexCache extends AbstractIndexComponent implements Closeable {
|
|||
return bitsetFilterCache;
|
||||
}
|
||||
|
||||
public PercolatorQueryCache getPercolatorQueryCache() {
|
||||
return percolatorQueryCache;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
IOUtils.close(queryCache, bitsetFilterCache, percolatorQueryCache);
|
||||
IOUtils.close(queryCache, bitsetFilterCache);
|
||||
}
|
||||
|
||||
public void clear(String reason) {
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
package org.elasticsearch.index.engine;
|
||||
|
||||
import org.apache.lucene.index.SegmentInfos;
|
||||
import org.elasticsearch.common.Base64;
|
||||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -29,6 +28,7 @@ import org.elasticsearch.common.xcontent.ToXContent;
|
|||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Base64;
|
||||
import java.util.Map;
|
||||
|
||||
/** a class the returns dynamic information with respect to the last commit point of this shard */
|
||||
|
@ -44,9 +44,7 @@ public final class CommitStats implements Streamable, ToXContent {
|
|||
userData = MapBuilder.<String, String>newMapBuilder().putAll(segmentInfos.getUserData()).immutableMap();
|
||||
// lucene calls the current generation, last generation.
|
||||
generation = segmentInfos.getLastGeneration();
|
||||
if (segmentInfos.getId() != null) { // id is only written starting with Lucene 5.0
|
||||
id = Base64.encodeBytes(segmentInfos.getId());
|
||||
}
|
||||
id = Base64.getEncoder().encodeToString(segmentInfos.getId());
|
||||
numDocs = Lucene.getNumDocs(segmentInfos);
|
||||
}
|
||||
|
||||
|
|
|
@ -28,22 +28,18 @@ import org.apache.lucene.index.IndexWriter;
|
|||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.SegmentCommitInfo;
|
||||
import org.apache.lucene.index.SegmentInfo;
|
||||
import org.apache.lucene.index.SegmentInfos;
|
||||
import org.apache.lucene.index.SegmentReader;
|
||||
import org.apache.lucene.index.SnapshotDeletionPolicy;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.SearcherManager;
|
||||
import org.apache.lucene.search.join.BitSetProducer;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.util.Accountable;
|
||||
import org.apache.lucene.util.Accountables;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.common.Base64;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
|
@ -65,7 +61,6 @@ import org.elasticsearch.index.mapper.ParsedDocument;
|
|||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.index.merge.MergeStats;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.shard.TranslogRecoveryPerformer;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
|
||||
|
@ -74,7 +69,7 @@ import java.io.FileNotFoundException;
|
|||
import java.io.IOException;
|
||||
import java.nio.file.NoSuchFileException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Base64;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
|
@ -1092,7 +1087,7 @@ public abstract class Engine implements Closeable {
|
|||
|
||||
@Override
|
||||
public String toString() {
|
||||
return Base64.encodeBytes(id);
|
||||
return Base64.getEncoder().encodeToString(id);
|
||||
}
|
||||
|
||||
public boolean idsEqual(byte[] id) {
|
||||
|
|
|
@ -23,7 +23,6 @@ import com.carrotsearch.hppc.ObjectHashSet;
|
|||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.DelegatingAnalyzerWrapper;
|
||||
import org.apache.lucene.document.FieldType;
|
||||
import org.elasticsearch.ElasticsearchGenerationException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
|
@ -35,15 +34,12 @@ import org.elasticsearch.index.IndexSettings;
|
|||
import org.elasticsearch.index.analysis.AnalysisService;
|
||||
import org.elasticsearch.index.mapper.Mapper.BuilderContext;
|
||||
import org.elasticsearch.index.mapper.object.ObjectMapper;
|
||||
import org.elasticsearch.index.percolator.PercolatorFieldMapper;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.similarity.SimilarityService;
|
||||
import org.elasticsearch.indices.InvalidTypeNameException;
|
||||
import org.elasticsearch.indices.TypeMissingException;
|
||||
import org.elasticsearch.indices.mapper.MapperRegistry;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
|
@ -97,6 +93,8 @@ public class MapperService extends AbstractIndexComponent {
|
|||
"_uid", "_id", "_type", "_all", "_parent", "_routing", "_index",
|
||||
"_size", "_timestamp", "_ttl"
|
||||
);
|
||||
@Deprecated
|
||||
public static final String PERCOLATOR_LEGACY_TYPE_NAME = ".percolator";
|
||||
|
||||
private final AnalysisService analysisService;
|
||||
|
||||
|
@ -269,7 +267,6 @@ public class MapperService extends AbstractIndexComponent {
|
|||
checkNestedFieldsLimit(fullPathObjectMappers);
|
||||
checkTotalFieldsLimit(objectMappers.size() + fieldMappers.size());
|
||||
checkDepthLimit(fullPathObjectMappers.keySet());
|
||||
checkPercolatorFieldLimit(fieldTypes);
|
||||
}
|
||||
|
||||
Set<String> parentTypes = this.parentTypes;
|
||||
|
@ -321,7 +318,7 @@ public class MapperService extends AbstractIndexComponent {
|
|||
private boolean typeNameStartsWithIllegalDot(DocumentMapper mapper) {
|
||||
boolean legacyIndex = getIndexSettings().getIndexVersionCreated().before(Version.V_5_0_0_alpha1);
|
||||
if (legacyIndex) {
|
||||
return mapper.type().startsWith(".") && !PercolatorFieldMapper.LEGACY_TYPE_NAME.equals(mapper.type());
|
||||
return mapper.type().startsWith(".") && !PERCOLATOR_LEGACY_TYPE_NAME.equals(mapper.type());
|
||||
} else {
|
||||
return mapper.type().startsWith(".");
|
||||
}
|
||||
|
@ -432,25 +429,6 @@ public class MapperService extends AbstractIndexComponent {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* We only allow upto 1 percolator field per index.
|
||||
*
|
||||
* Reasoning here is that the PercolatorQueryCache only supports a single document having a percolator query.
|
||||
* Also specifying multiple queries per document feels like an anti pattern
|
||||
*/
|
||||
private void checkPercolatorFieldLimit(Iterable<MappedFieldType> fieldTypes) {
|
||||
List<String> percolatorFieldTypes = new ArrayList<>();
|
||||
for (MappedFieldType fieldType : fieldTypes) {
|
||||
if (fieldType instanceof PercolatorFieldMapper.PercolatorFieldType) {
|
||||
percolatorFieldTypes.add(fieldType.name());
|
||||
}
|
||||
}
|
||||
if (percolatorFieldTypes.size() > 1) {
|
||||
throw new IllegalArgumentException("Up to one percolator field type is allowed per index, " +
|
||||
"found the following percolator fields [" + percolatorFieldTypes + "]");
|
||||
}
|
||||
}
|
||||
|
||||
public DocumentMapper parse(String mappingType, CompressedXContent mappingSource, boolean applyDefault) throws MapperParsingException {
|
||||
return documentParser.parse(mappingType, mappingSource, applyDefault ? defaultMappingSource : null);
|
||||
}
|
||||
|
|
|
@ -26,8 +26,6 @@ import org.apache.lucene.search.Query;
|
|||
import org.apache.lucene.store.ByteArrayDataOutput;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.Base64;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -45,6 +43,7 @@ import org.elasticsearch.index.query.QueryShardContext;
|
|||
import org.elasticsearch.index.query.QueryShardException;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Base64;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
|
@ -124,11 +123,7 @@ public class BinaryFieldMapper extends FieldMapper {
|
|||
} else if (value instanceof byte[]) {
|
||||
bytes = new BytesArray((byte[]) value);
|
||||
} else {
|
||||
try {
|
||||
bytes = new BytesArray(Base64.decode(value.toString()));
|
||||
} catch (IOException e) {
|
||||
throw new ElasticsearchParseException("failed to convert bytes", e);
|
||||
}
|
||||
bytes = new BytesArray(Base64.getDecoder().decode(value.toString()));
|
||||
}
|
||||
return bytes;
|
||||
}
|
||||
|
|
|
@ -85,7 +85,7 @@ public class IndexFieldMapper extends MetadataFieldMapper {
|
|||
public static class TypeParser implements MetadataFieldMapper.TypeParser {
|
||||
@Override
|
||||
public MetadataFieldMapper.Builder<?,?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
if (parserContext.indexVersionCreated().onOrAfter(Version.V_5_0_0)) {
|
||||
if (parserContext.indexVersionCreated().onOrAfter(Version.V_5_0_0_alpha3)) {
|
||||
throw new MapperParsingException(NAME + " is not configurable");
|
||||
}
|
||||
return new Builder(parserContext.mapperService().fullName(NAME));
|
||||
|
|
|
@ -1,294 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.percolator;
|
||||
|
||||
import com.carrotsearch.hppc.IntObjectHashMap;
|
||||
import org.apache.lucene.index.BinaryDocValues;
|
||||
import org.apache.lucene.index.FieldInfo;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.PostingsEnum;
|
||||
import org.apache.lucene.index.StoredFieldVisitor;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.search.Weight;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.cache.Cache;
|
||||
import org.elasticsearch.common.cache.CacheBuilder;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContent;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.AbstractIndexComponent;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.IndexWarmer;
|
||||
import org.elasticsearch.index.IndexWarmer.TerminationHandle;
|
||||
import org.elasticsearch.index.engine.Engine.Searcher;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.internal.SourceFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
|
||||
import org.elasticsearch.index.query.PercolateQuery;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.shard.ShardUtils;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import static org.elasticsearch.index.percolator.PercolatorFieldMapper.LEGACY_TYPE_NAME;
|
||||
import static org.elasticsearch.index.percolator.PercolatorFieldMapper.PercolatorFieldType;
|
||||
import static org.elasticsearch.index.percolator.PercolatorFieldMapper.parseQuery;
|
||||
|
||||
public final class PercolatorQueryCache extends AbstractIndexComponent
|
||||
implements Closeable, LeafReader.CoreClosedListener, PercolateQuery.QueryRegistry {
|
||||
|
||||
public final static Setting<Boolean> INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING =
|
||||
Setting.boolSetting("index.percolator.map_unmapped_fields_as_string", false, Setting.Property.IndexScope);
|
||||
|
||||
public final static XContentType QUERY_BUILDER_CONTENT_TYPE = XContentType.SMILE;
|
||||
|
||||
private final Supplier<QueryShardContext> queryShardContextSupplier;
|
||||
private final Cache<Object, QueriesLeaf> cache;
|
||||
private final boolean mapUnmappedFieldsAsString;
|
||||
|
||||
public PercolatorQueryCache(IndexSettings indexSettings, Supplier<QueryShardContext> queryShardContextSupplier) {
|
||||
super(indexSettings);
|
||||
this.queryShardContextSupplier = queryShardContextSupplier;
|
||||
cache = CacheBuilder.<Object, QueriesLeaf>builder().build();
|
||||
this.mapUnmappedFieldsAsString = indexSettings.getValue(INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Leaf getQueries(LeafReaderContext ctx) {
|
||||
QueriesLeaf percolatorQueries = cache.get(ctx.reader().getCoreCacheKey());
|
||||
if (percolatorQueries == null) {
|
||||
throw new IllegalStateException("queries not loaded, queries should be have been preloaded during index warming...");
|
||||
}
|
||||
return percolatorQueries;
|
||||
}
|
||||
|
||||
public IndexWarmer.Listener createListener(ThreadPool threadPool) {
|
||||
return new IndexWarmer.Listener() {
|
||||
|
||||
final Executor executor = threadPool.executor(ThreadPool.Names.WARMER);
|
||||
|
||||
@Override
|
||||
public TerminationHandle warmReader(IndexShard indexShard, Searcher searcher) {
|
||||
final CountDownLatch latch = new CountDownLatch(searcher.reader().leaves().size());
|
||||
for (final LeafReaderContext ctx : searcher.reader().leaves()) {
|
||||
if (cache.get(ctx.reader().getCoreCacheKey()) != null) {
|
||||
latch.countDown();
|
||||
continue;
|
||||
}
|
||||
executor.execute(() -> {
|
||||
try {
|
||||
final long start = System.nanoTime();
|
||||
QueriesLeaf queries = loadQueries(ctx, indexShard);
|
||||
cache.put(ctx.reader().getCoreCacheKey(), queries);
|
||||
if (indexShard.warmerService().logger().isTraceEnabled()) {
|
||||
indexShard.warmerService().logger().trace(
|
||||
"loading percolator queries took [{}]",
|
||||
TimeValue.timeValueNanos(System.nanoTime() - start)
|
||||
);
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
indexShard.warmerService().logger().warn("failed to load percolator queries", t);
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
});
|
||||
}
|
||||
return () -> latch.await();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
QueriesLeaf loadQueries(LeafReaderContext context, IndexShard indexShard) throws IOException {
|
||||
Version indexVersionCreated = indexShard.indexSettings().getIndexVersionCreated();
|
||||
MapperService mapperService = indexShard.mapperService();
|
||||
LeafReader leafReader = context.reader();
|
||||
ShardId shardId = ShardUtils.extractShardId(leafReader);
|
||||
if (shardId == null) {
|
||||
throw new IllegalStateException("can't resolve shard id");
|
||||
}
|
||||
if (indexSettings.getIndex().equals(shardId.getIndex()) == false) {
|
||||
// percolator cache insanity
|
||||
String message = "Trying to load queries for index " + shardId.getIndex() + " with cache of index " +
|
||||
indexSettings.getIndex();
|
||||
throw new IllegalStateException(message);
|
||||
}
|
||||
|
||||
IntObjectHashMap<Query> queries = new IntObjectHashMap<>();
|
||||
boolean legacyLoading = indexVersionCreated.before(Version.V_5_0_0_alpha1);
|
||||
if (legacyLoading) {
|
||||
PostingsEnum postings = leafReader.postings(new Term(TypeFieldMapper.NAME, LEGACY_TYPE_NAME), PostingsEnum.NONE);
|
||||
if (postings != null) {
|
||||
LegacyQueryFieldVisitor visitor = new LegacyQueryFieldVisitor();
|
||||
for (int docId = postings.nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = postings.nextDoc()) {
|
||||
leafReader.document(docId, visitor);
|
||||
queries.put(docId, parseLegacyPercolatorDocument(docId, visitor.source));
|
||||
visitor.source = null; // reset
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Each type can have one percolator field mapper,
|
||||
// So for each type we check if there is a percolator field mapper
|
||||
// and parse all the queries for the documents of that type.
|
||||
IndexSearcher indexSearcher = new IndexSearcher(leafReader);
|
||||
for (DocumentMapper documentMapper : mapperService.docMappers(false)) {
|
||||
Weight queryWeight = indexSearcher.createNormalizedWeight(documentMapper.typeFilter(), false);
|
||||
for (FieldMapper fieldMapper : documentMapper.mappers()) {
|
||||
if (fieldMapper instanceof PercolatorFieldMapper) {
|
||||
PercolatorFieldType fieldType = (PercolatorFieldType) fieldMapper.fieldType();
|
||||
BinaryDocValues binaryDocValues = leafReader.getBinaryDocValues(fieldType.getQueryBuilderFieldName());
|
||||
if (binaryDocValues != null) {
|
||||
// use the same leaf reader context the indexSearcher is using too:
|
||||
Scorer scorer = queryWeight.scorer(leafReader.getContext());
|
||||
if (scorer != null) {
|
||||
DocIdSetIterator iterator = scorer.iterator();
|
||||
for (int docId = iterator.nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = iterator.nextDoc()) {
|
||||
BytesRef qbSource = binaryDocValues.get(docId);
|
||||
if (qbSource.length > 0) {
|
||||
queries.put(docId, parseQueryBuilder(docId, qbSource));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
leafReader.addCoreClosedListener(this);
|
||||
return new QueriesLeaf(shardId, queries);
|
||||
}
|
||||
|
||||
private Query parseQueryBuilder(int docId, BytesRef qbSource) {
|
||||
XContent xContent = QUERY_BUILDER_CONTENT_TYPE.xContent();
|
||||
try (XContentParser sourceParser = xContent.createParser(qbSource.bytes, qbSource.offset, qbSource.length)) {
|
||||
QueryShardContext context = queryShardContextSupplier.get();
|
||||
return parseQuery(context, mapUnmappedFieldsAsString, sourceParser);
|
||||
} catch (IOException e) {
|
||||
throw new PercolatorException(index(), "failed to parse query builder for document [" + docId + "]", e);
|
||||
}
|
||||
}
|
||||
|
||||
private Query parseLegacyPercolatorDocument(int docId, BytesReference source) {
|
||||
try (XContentParser sourceParser = XContentHelper.createParser(source)) {
|
||||
String currentFieldName = null;
|
||||
XContentParser.Token token = sourceParser.nextToken(); // move the START_OBJECT
|
||||
if (token != XContentParser.Token.START_OBJECT) {
|
||||
throw new ElasticsearchException("failed to parse query [" + docId + "], not starting with OBJECT");
|
||||
}
|
||||
while ((token = sourceParser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = sourceParser.currentName();
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
if ("query".equals(currentFieldName)) {
|
||||
QueryShardContext context = queryShardContextSupplier.get();
|
||||
return parseQuery(context, mapUnmappedFieldsAsString, sourceParser);
|
||||
} else {
|
||||
sourceParser.skipChildren();
|
||||
}
|
||||
} else if (token == XContentParser.Token.START_ARRAY) {
|
||||
sourceParser.skipChildren();
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
throw new PercolatorException(index(), "failed to parse query [" + docId + "]", e);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
public PercolatorQueryCacheStats getStats(ShardId shardId) {
|
||||
int numberOfQueries = 0;
|
||||
for (QueriesLeaf queries : cache.values()) {
|
||||
if (shardId.equals(queries.shardId)) {
|
||||
numberOfQueries += queries.queries.size();
|
||||
}
|
||||
}
|
||||
return new PercolatorQueryCacheStats(numberOfQueries);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onClose(Object cacheKey) throws IOException {
|
||||
cache.invalidate(cacheKey);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
cache.invalidateAll();
|
||||
}
|
||||
|
||||
final static class LegacyQueryFieldVisitor extends StoredFieldVisitor {
|
||||
|
||||
private BytesArray source;
|
||||
|
||||
@Override
|
||||
public void binaryField(FieldInfo fieldInfo, byte[] bytes) throws IOException {
|
||||
source = new BytesArray(bytes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Status needsField(FieldInfo fieldInfo) throws IOException {
|
||||
if (source != null) {
|
||||
return Status.STOP;
|
||||
}
|
||||
if (SourceFieldMapper.NAME.equals(fieldInfo.name)) {
|
||||
return Status.YES;
|
||||
} else {
|
||||
return Status.NO;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
final static class QueriesLeaf implements Leaf {
|
||||
|
||||
final ShardId shardId;
|
||||
final IntObjectHashMap<Query> queries;
|
||||
|
||||
QueriesLeaf(ShardId shardId, IntObjectHashMap<Query> queries) {
|
||||
this.shardId = shardId;
|
||||
this.queries = queries;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query getQuery(int docId) {
|
||||
return queries.get(docId);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,89 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.index.percolator;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Exposes percolator query cache statistics.
|
||||
*/
|
||||
public class PercolatorQueryCacheStats implements Streamable, ToXContent {
|
||||
|
||||
private long numQueries;
|
||||
|
||||
/**
|
||||
* Noop constructor for serialization purposes.
|
||||
*/
|
||||
public PercolatorQueryCacheStats() {
|
||||
}
|
||||
|
||||
PercolatorQueryCacheStats(long numQueries) {
|
||||
this.numQueries = numQueries;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The total number of loaded percolate queries.
|
||||
*/
|
||||
public long getNumQueries() {
|
||||
return numQueries;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(Fields.PERCOLATOR);
|
||||
builder.field(Fields.QUERIES, getNumQueries());
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
public void add(PercolatorQueryCacheStats percolate) {
|
||||
if (percolate == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
numQueries += percolate.getNumQueries();
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final String PERCOLATOR = "percolator";
|
||||
static final String QUERIES = "num_queries";
|
||||
}
|
||||
|
||||
public static PercolatorQueryCacheStats readPercolateStats(StreamInput in) throws IOException {
|
||||
PercolatorQueryCacheStats stats = new PercolatorQueryCacheStats();
|
||||
stats.readFrom(in);
|
||||
return stats;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
numQueries = in.readVLong();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVLong(numQueries);
|
||||
}
|
||||
}
|
|
@ -340,7 +340,7 @@ public class GeoDistanceRangeQueryBuilder extends AbstractQueryBuilder<GeoDistan
|
|||
fromValue = geoDistance.normalize(fromValue, DistanceUnit.DEFAULT);
|
||||
}
|
||||
} else {
|
||||
fromValue = new Double(0);
|
||||
fromValue = 0.0;
|
||||
}
|
||||
|
||||
if (to != null) {
|
||||
|
|
|
@ -267,7 +267,7 @@ public class MatchQueryBuilder extends AbstractQueryBuilder<MatchQueryBuilder> {
|
|||
*/
|
||||
public MatchQueryBuilder prefixLength(int prefixLength) {
|
||||
if (prefixLength < 0 ) {
|
||||
throw new IllegalArgumentException("No negative prefix length allowed.");
|
||||
throw new IllegalArgumentException("[" + NAME + "] requires prefix length to be non-negative.");
|
||||
}
|
||||
this.prefixLength = prefixLength;
|
||||
return this;
|
||||
|
@ -284,8 +284,8 @@ public class MatchQueryBuilder extends AbstractQueryBuilder<MatchQueryBuilder> {
|
|||
* When using fuzzy or prefix type query, the number of term expansions to use.
|
||||
*/
|
||||
public MatchQueryBuilder maxExpansions(int maxExpansions) {
|
||||
if (maxExpansions < 0 ) {
|
||||
throw new IllegalArgumentException("No negative maxExpansions allowed.");
|
||||
if (maxExpansions <= 0 ) {
|
||||
throw new IllegalArgumentException("[" + NAME + "] requires maxExpansions to be positive.");
|
||||
}
|
||||
this.maxExpansions = maxExpansions;
|
||||
return this;
|
||||
|
|
|
@ -839,24 +839,6 @@ public abstract class QueryBuilders {
|
|||
return new ExistsQueryBuilder(name);
|
||||
}
|
||||
|
||||
public static PercolateQueryBuilder percolateQuery(String queryField, String documentType, BytesReference document) {
|
||||
return new PercolateQueryBuilder(queryField, documentType, document);
|
||||
}
|
||||
|
||||
public static PercolateQueryBuilder percolateQuery(String queryField, String documentType, String indexedDocumentIndex,
|
||||
String indexedDocumentType, String indexedDocumentId) {
|
||||
return new PercolateQueryBuilder(queryField, documentType, indexedDocumentIndex, indexedDocumentType, indexedDocumentId,
|
||||
null, null, null);
|
||||
}
|
||||
|
||||
public static PercolateQueryBuilder percolateQuery(String queryField, String documentType, String indexedDocumentIndex,
|
||||
String indexedDocumentType, String indexedDocumentId,
|
||||
String indexedDocumentRouting, String indexedDocumentPreference,
|
||||
Long indexedDocumentVersion) {
|
||||
return new PercolateQueryBuilder(queryField, documentType, indexedDocumentIndex, indexedDocumentType, indexedDocumentId,
|
||||
indexedDocumentRouting, indexedDocumentPreference, indexedDocumentVersion);
|
||||
}
|
||||
|
||||
private QueryBuilders() {
|
||||
|
||||
}
|
||||
|
|
|
@ -51,7 +51,6 @@ import org.elasticsearch.index.mapper.Mapper;
|
|||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.core.TextFieldMapper;
|
||||
import org.elasticsearch.index.mapper.object.ObjectMapper;
|
||||
import org.elasticsearch.index.percolator.PercolatorQueryCache;
|
||||
import org.elasticsearch.index.query.support.NestedScope;
|
||||
import org.elasticsearch.index.similarity.SimilarityService;
|
||||
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
|
||||
|
@ -82,7 +81,6 @@ public class QueryShardContext extends QueryRewriteContext {
|
|||
private final Map<String, Query> namedQueries = new HashMap<>();
|
||||
private final MapperQueryParser queryParser = new MapperQueryParser(this);
|
||||
private final IndicesQueriesRegistry indicesQueriesRegistry;
|
||||
private final PercolatorQueryCache percolatorQueryCache;
|
||||
private boolean allowUnmappedFields;
|
||||
private boolean mapUnmappedFieldAsString;
|
||||
private NestedScope nestedScope;
|
||||
|
@ -90,7 +88,7 @@ public class QueryShardContext extends QueryRewriteContext {
|
|||
|
||||
public QueryShardContext(IndexSettings indexSettings, BitsetFilterCache bitsetFilterCache, IndexFieldDataService indexFieldDataService,
|
||||
MapperService mapperService, SimilarityService similarityService, ScriptService scriptService,
|
||||
final IndicesQueriesRegistry indicesQueriesRegistry, Client client, PercolatorQueryCache percolatorQueryCache,
|
||||
final IndicesQueriesRegistry indicesQueriesRegistry, Client client,
|
||||
IndexReader reader, ClusterState clusterState) {
|
||||
super(indexSettings, mapperService, scriptService, indicesQueriesRegistry, client, reader, clusterState);
|
||||
this.indexSettings = indexSettings;
|
||||
|
@ -100,14 +98,13 @@ public class QueryShardContext extends QueryRewriteContext {
|
|||
this.indexFieldDataService = indexFieldDataService;
|
||||
this.allowUnmappedFields = indexSettings.isDefaultAllowUnmappedFields();
|
||||
this.indicesQueriesRegistry = indicesQueriesRegistry;
|
||||
this.percolatorQueryCache = percolatorQueryCache;
|
||||
this.nestedScope = new NestedScope();
|
||||
}
|
||||
|
||||
public QueryShardContext(QueryShardContext source) {
|
||||
this(source.indexSettings, source.bitsetFilterCache, source.indexFieldDataService, source.mapperService,
|
||||
source.similarityService, source.scriptService, source.indicesQueriesRegistry, source.client,
|
||||
source.percolatorQueryCache, source.reader, source.clusterState);
|
||||
source.reader, source.clusterState);
|
||||
this.types = source.getTypes();
|
||||
}
|
||||
|
||||
|
@ -123,10 +120,6 @@ public class QueryShardContext extends QueryRewriteContext {
|
|||
return mapperService.analysisService();
|
||||
}
|
||||
|
||||
public PercolatorQueryCache getPercolatorQueryCache() {
|
||||
return percolatorQueryCache;
|
||||
}
|
||||
|
||||
public Similarity getSearchSimilarity() {
|
||||
return similarityService != null ? similarityService.similarity(mapperService) : null;
|
||||
}
|
||||
|
@ -178,7 +171,12 @@ public class QueryShardContext extends QueryRewriteContext {
|
|||
return isFilter;
|
||||
}
|
||||
|
||||
void setIsFilter(boolean isFilter) {
|
||||
/**
|
||||
* Public for testing only!
|
||||
*
|
||||
* Sets whether we are currently parsing a filter or a query
|
||||
*/
|
||||
public void setIsFilter(boolean isFilter) {
|
||||
this.isFilter = isFilter;
|
||||
}
|
||||
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.elasticsearch.common.xcontent.XContentParser;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
|
@ -64,7 +65,7 @@ public class SpanNearQueryBuilder extends AbstractQueryBuilder<SpanNearQueryBuil
|
|||
*/
|
||||
public SpanNearQueryBuilder(SpanQueryBuilder initialClause, int slop) {
|
||||
if (initialClause == null) {
|
||||
throw new IllegalArgumentException("query must include at least one clause");
|
||||
throw new IllegalArgumentException("[" + NAME + "] must include at least one clause");
|
||||
}
|
||||
this.clauses.add(initialClause);
|
||||
this.slop = slop;
|
||||
|
@ -96,9 +97,12 @@ public class SpanNearQueryBuilder extends AbstractQueryBuilder<SpanNearQueryBuil
|
|||
return this.slop;
|
||||
}
|
||||
|
||||
public SpanNearQueryBuilder clause(SpanQueryBuilder clause) {
|
||||
/**
|
||||
* Add a span clause to the current list of clauses
|
||||
*/
|
||||
public SpanNearQueryBuilder addClause(SpanQueryBuilder clause) {
|
||||
if (clause == null) {
|
||||
throw new IllegalArgumentException("query clauses cannot be null");
|
||||
throw new IllegalArgumentException("[" + NAME + "] clauses cannot be null");
|
||||
}
|
||||
clauses.add(clause);
|
||||
return this;
|
||||
|
@ -108,7 +112,7 @@ public class SpanNearQueryBuilder extends AbstractQueryBuilder<SpanNearQueryBuil
|
|||
* @return the {@link SpanQueryBuilder} clauses that were set for this query
|
||||
*/
|
||||
public List<SpanQueryBuilder> clauses() {
|
||||
return this.clauses;
|
||||
return Collections.unmodifiableList(this.clauses);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -198,7 +202,7 @@ public class SpanNearQueryBuilder extends AbstractQueryBuilder<SpanNearQueryBuil
|
|||
|
||||
SpanNearQueryBuilder queryBuilder = new SpanNearQueryBuilder(clauses.get(0), slop);
|
||||
for (int i = 1; i < clauses.size(); i++) {
|
||||
queryBuilder.clause(clauses.get(i));
|
||||
queryBuilder.addClause(clauses.get(i));
|
||||
}
|
||||
queryBuilder.inOrder(inOrder);
|
||||
queryBuilder.boost(boost);
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.elasticsearch.common.xcontent.XContentParser;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
|
@ -48,7 +49,7 @@ public class SpanOrQueryBuilder extends AbstractQueryBuilder<SpanOrQueryBuilder>
|
|||
|
||||
public SpanOrQueryBuilder(SpanQueryBuilder initialClause) {
|
||||
if (initialClause == null) {
|
||||
throw new IllegalArgumentException("query must include at least one clause");
|
||||
throw new IllegalArgumentException("[" + NAME + "] must include at least one clause");
|
||||
}
|
||||
clauses.add(initialClause);
|
||||
}
|
||||
|
@ -68,9 +69,12 @@ public class SpanOrQueryBuilder extends AbstractQueryBuilder<SpanOrQueryBuilder>
|
|||
writeQueries(out, clauses);
|
||||
}
|
||||
|
||||
public SpanOrQueryBuilder clause(SpanQueryBuilder clause) {
|
||||
/**
|
||||
* Add a span clause to the current list of clauses
|
||||
*/
|
||||
public SpanOrQueryBuilder addClause(SpanQueryBuilder clause) {
|
||||
if (clause == null) {
|
||||
throw new IllegalArgumentException("inner bool query clause cannot be null");
|
||||
throw new IllegalArgumentException("[" + NAME + "] inner clause cannot be null");
|
||||
}
|
||||
clauses.add(clause);
|
||||
return this;
|
||||
|
@ -80,7 +84,7 @@ public class SpanOrQueryBuilder extends AbstractQueryBuilder<SpanOrQueryBuilder>
|
|||
* @return the {@link SpanQueryBuilder} clauses that were set for this query
|
||||
*/
|
||||
public List<SpanQueryBuilder> clauses() {
|
||||
return this.clauses;
|
||||
return Collections.unmodifiableList(this.clauses);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -137,7 +141,7 @@ public class SpanOrQueryBuilder extends AbstractQueryBuilder<SpanOrQueryBuilder>
|
|||
|
||||
SpanOrQueryBuilder queryBuilder = new SpanOrQueryBuilder(clauses.get(0));
|
||||
for (int i = 1; i < clauses.size(); i++) {
|
||||
queryBuilder.clause(clauses.get(i));
|
||||
queryBuilder.addClause(clauses.get(i));
|
||||
}
|
||||
queryBuilder.boost(boost);
|
||||
queryBuilder.queryName(queryName);
|
||||
|
|
|
@ -576,16 +576,22 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
long bytes = getEngine().getIndexBufferRAMBytesUsed();
|
||||
writingBytes.addAndGet(bytes);
|
||||
try {
|
||||
logger.debug("refresh with source [{}] indexBufferRAMBytesUsed [{}]", source, new ByteSizeValue(bytes));
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("refresh with source [{}] indexBufferRAMBytesUsed [{}]", source, new ByteSizeValue(bytes));
|
||||
}
|
||||
long time = System.nanoTime();
|
||||
getEngine().refresh(source);
|
||||
refreshMetric.inc(System.nanoTime() - time);
|
||||
} finally {
|
||||
logger.debug("remove [{}] writing bytes for shard [{}]", new ByteSizeValue(bytes), shardId());
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("remove [{}] writing bytes for shard [{}]", new ByteSizeValue(bytes), shardId());
|
||||
}
|
||||
writingBytes.addAndGet(-bytes);
|
||||
}
|
||||
} else {
|
||||
logger.debug("refresh with source [{}]", source);
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("refresh with source [{}]", source);
|
||||
}
|
||||
long time = System.nanoTime();
|
||||
getEngine().refresh(source);
|
||||
refreshMetric.inc(System.nanoTime() - time);
|
||||
|
|
|
@ -39,7 +39,6 @@ import java.nio.file.OpenOption;
|
|||
import java.nio.file.Path;
|
||||
import java.nio.file.StandardOpenOption;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.locks.ReentrantLock;
|
||||
|
||||
public class TranslogWriter extends BaseTranslogReader implements Closeable {
|
||||
|
||||
|
@ -154,7 +153,9 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable {
|
|||
/**
|
||||
* returns true if there are buffered ops
|
||||
*/
|
||||
public boolean syncNeeded() { return totalOffset != lastSyncedOffset; }
|
||||
public boolean syncNeeded() {
|
||||
return totalOffset != lastSyncedOffset;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int totalOperations() {
|
||||
|
@ -169,40 +170,55 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable {
|
|||
/**
|
||||
* closes this writer and transfers it's underlying file channel to a new immutable reader
|
||||
*/
|
||||
public synchronized TranslogReader closeIntoReader() throws IOException {
|
||||
try {
|
||||
sync(); // sync before we close..
|
||||
} catch (IOException e) {
|
||||
closeWithTragicEvent(e);
|
||||
throw e;
|
||||
}
|
||||
if (closed.compareAndSet(false, true)) {
|
||||
boolean success = false;
|
||||
try {
|
||||
final TranslogReader reader = new TranslogReader(generation, channel, path, firstOperationOffset, getWrittenOffset(), operationCounter);
|
||||
success = true;
|
||||
return reader;
|
||||
} finally {
|
||||
if (success == false) {
|
||||
// close the channel, as we are closed and failed to create a new reader
|
||||
IOUtils.closeWhileHandlingException(channel);
|
||||
public TranslogReader closeIntoReader() throws IOException {
|
||||
// make sure to acquire the sync lock first, to prevent dead locks with threads calling
|
||||
// syncUpTo() , where the sync lock is acquired first, following by the synchronize(this)
|
||||
//
|
||||
// Note: While this is not strictly needed as this method is called while blocking all ops on the translog,
|
||||
// we do this to for correctness and preventing future issues.
|
||||
synchronized (syncLock) {
|
||||
synchronized (this) {
|
||||
try {
|
||||
sync(); // sync before we close..
|
||||
} catch (IOException e) {
|
||||
closeWithTragicEvent(e);
|
||||
throw e;
|
||||
}
|
||||
if (closed.compareAndSet(false, true)) {
|
||||
boolean success = false;
|
||||
try {
|
||||
final TranslogReader reader = new TranslogReader(generation, channel, path, firstOperationOffset, getWrittenOffset(), operationCounter);
|
||||
success = true;
|
||||
return reader;
|
||||
} finally {
|
||||
if (success == false) {
|
||||
// close the channel, as we are closed and failed to create a new reader
|
||||
IOUtils.closeWhileHandlingException(channel);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
throw new AlreadyClosedException("translog [" + getGeneration() + "] is already closed (path [" + path + "]", tragedy);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
throw new AlreadyClosedException("translog [" + getGeneration() + "] is already closed (path [" + path + "]", tragedy);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public synchronized Translog.Snapshot newSnapshot() {
|
||||
ensureOpen();
|
||||
try {
|
||||
sync();
|
||||
} catch (IOException e) {
|
||||
throw new TranslogException(shardId, "exception while syncing before creating a snapshot", e);
|
||||
public Translog.Snapshot newSnapshot() {
|
||||
// make sure to acquire the sync lock first, to prevent dead locks with threads calling
|
||||
// syncUpTo() , where the sync lock is acquired first, following by the synchronize(this)
|
||||
synchronized (syncLock) {
|
||||
synchronized (this) {
|
||||
ensureOpen();
|
||||
try {
|
||||
sync();
|
||||
} catch (IOException e) {
|
||||
throw new TranslogException(shardId, "exception while syncing before creating a snapshot", e);
|
||||
}
|
||||
return super.newSnapshot();
|
||||
}
|
||||
}
|
||||
return super.newSnapshot();
|
||||
}
|
||||
|
||||
private long getWrittenOffset() throws IOException {
|
||||
|
|
|
@ -34,7 +34,6 @@ import org.elasticsearch.index.mapper.core.KeywordFieldMapper;
|
|||
import org.elasticsearch.index.mapper.core.StringFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.TextFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.TokenCountFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.LegacyTokenCountFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.NumberFieldMapper;
|
||||
import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper;
|
||||
import org.elasticsearch.index.mapper.geo.GeoShapeFieldMapper;
|
||||
|
@ -52,7 +51,6 @@ import org.elasticsearch.index.mapper.internal.UidFieldMapper;
|
|||
import org.elasticsearch.index.mapper.internal.VersionFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ip.IpFieldMapper;
|
||||
import org.elasticsearch.index.mapper.object.ObjectMapper;
|
||||
import org.elasticsearch.index.percolator.PercolatorFieldMapper;
|
||||
import org.elasticsearch.indices.cluster.IndicesClusterStateService;
|
||||
import org.elasticsearch.indices.flush.SyncedFlushService;
|
||||
import org.elasticsearch.indices.mapper.MapperRegistry;
|
||||
|
@ -98,7 +96,6 @@ public class IndicesModule extends AbstractModule {
|
|||
registerMapper(ObjectMapper.NESTED_CONTENT_TYPE, new ObjectMapper.TypeParser());
|
||||
registerMapper(CompletionFieldMapper.CONTENT_TYPE, new CompletionFieldMapper.TypeParser());
|
||||
registerMapper(GeoPointFieldMapper.CONTENT_TYPE, new GeoPointFieldMapper.TypeParser());
|
||||
registerMapper(PercolatorFieldMapper.CONTENT_TYPE, new PercolatorFieldMapper.TypeParser());
|
||||
|
||||
if (ShapesAvailability.JTS_AVAILABLE && ShapesAvailability.SPATIAL4J_AVAILABLE) {
|
||||
registerMapper(GeoShapeFieldMapper.CONTENT_TYPE, new GeoShapeFieldMapper.TypeParser());
|
||||
|
|
|
@ -280,7 +280,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||
if (indexShard.routingEntry() == null) {
|
||||
continue;
|
||||
}
|
||||
IndexShardStats indexShardStats = new IndexShardStats(indexShard.shardId(), new ShardStats[] { new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesQueryCache, indexService.cache().getPercolatorQueryCache(), indexShard, flags), indexShard.commitStats()) });
|
||||
IndexShardStats indexShardStats = new IndexShardStats(indexShard.shardId(), new ShardStats[] { new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesQueryCache, indexShard, flags), indexShard.commitStats()) });
|
||||
if (!statsByShard.containsKey(indexService.index())) {
|
||||
statsByShard.put(indexService.index(), arrayAsArrayList(indexShardStats));
|
||||
} else {
|
||||
|
|
|
@ -37,7 +37,6 @@ import org.elasticsearch.index.flush.FlushStats;
|
|||
import org.elasticsearch.index.get.GetStats;
|
||||
import org.elasticsearch.index.shard.IndexingStats;
|
||||
import org.elasticsearch.index.merge.MergeStats;
|
||||
import org.elasticsearch.index.percolator.PercolatorQueryCacheStats;
|
||||
import org.elasticsearch.index.recovery.RecoveryStats;
|
||||
import org.elasticsearch.index.refresh.RefreshStats;
|
||||
import org.elasticsearch.index.search.stats.SearchStats;
|
||||
|
@ -102,11 +101,6 @@ public class NodeIndicesStats implements Streamable, ToXContent {
|
|||
return stats.getSearch();
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public PercolatorQueryCacheStats getPercolate() {
|
||||
return stats.getPercolatorCache();
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public MergeStats getMerge() {
|
||||
return stats.getMerge();
|
||||
|
|
|
@ -20,14 +20,13 @@
|
|||
|
||||
package org.elasticsearch.ingest.core;
|
||||
|
||||
import org.elasticsearch.common.util.iterable.Iterables;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
|
@ -94,30 +93,38 @@ public class CompoundProcessor implements Processor {
|
|||
try {
|
||||
processor.execute(ingestDocument);
|
||||
} catch (Exception e) {
|
||||
ElasticsearchException compoundProcessorException = newCompoundProcessorException(e, processor.getType(), processor.getTag());
|
||||
if (onFailureProcessors.isEmpty()) {
|
||||
throw e;
|
||||
throw compoundProcessorException;
|
||||
} else {
|
||||
executeOnFailure(ingestDocument, e, processor.getType(), processor.getTag());
|
||||
executeOnFailure(ingestDocument, compoundProcessorException);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void executeOnFailure(IngestDocument ingestDocument, Exception cause, String failedProcessorType, String failedProcessorTag) throws Exception {
|
||||
void executeOnFailure(IngestDocument ingestDocument, ElasticsearchException exception) throws Exception {
|
||||
try {
|
||||
putFailureMetadata(ingestDocument, cause, failedProcessorType, failedProcessorTag);
|
||||
putFailureMetadata(ingestDocument, exception);
|
||||
for (Processor processor : onFailureProcessors) {
|
||||
processor.execute(ingestDocument);
|
||||
try {
|
||||
processor.execute(ingestDocument);
|
||||
} catch (Exception e) {
|
||||
throw newCompoundProcessorException(e, processor.getType(), processor.getTag());
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
removeFailureMetadata(ingestDocument);
|
||||
}
|
||||
}
|
||||
|
||||
private void putFailureMetadata(IngestDocument ingestDocument, Exception cause, String failedProcessorType, String failedProcessorTag) {
|
||||
private void putFailureMetadata(IngestDocument ingestDocument, ElasticsearchException cause) {
|
||||
List<String> processorTypeHeader = cause.getHeader("processor_type");
|
||||
List<String> processorTagHeader = cause.getHeader("processor_tag");
|
||||
String failedProcessorType = (processorTypeHeader != null) ? processorTypeHeader.get(0) : null;
|
||||
String failedProcessorTag = (processorTagHeader != null) ? processorTagHeader.get(0) : null;
|
||||
Map<String, String> ingestMetadata = ingestDocument.getIngestMetadata();
|
||||
ingestMetadata.put(ON_FAILURE_MESSAGE_FIELD, cause.getMessage());
|
||||
ingestMetadata.put(ON_FAILURE_MESSAGE_FIELD, cause.getRootCause().getMessage());
|
||||
ingestMetadata.put(ON_FAILURE_PROCESSOR_TYPE_FIELD, failedProcessorType);
|
||||
ingestMetadata.put(ON_FAILURE_PROCESSOR_TAG_FIELD, failedProcessorTag);
|
||||
}
|
||||
|
@ -128,4 +135,21 @@ public class CompoundProcessor implements Processor {
|
|||
ingestMetadata.remove(ON_FAILURE_PROCESSOR_TYPE_FIELD);
|
||||
ingestMetadata.remove(ON_FAILURE_PROCESSOR_TAG_FIELD);
|
||||
}
|
||||
|
||||
private ElasticsearchException newCompoundProcessorException(Exception e, String processorType, String processorTag) {
|
||||
if (e instanceof ElasticsearchException && ((ElasticsearchException)e).getHeader("processor_type") != null) {
|
||||
return (ElasticsearchException) e;
|
||||
}
|
||||
|
||||
ElasticsearchException exception = new ElasticsearchException(new IllegalArgumentException(e));
|
||||
|
||||
if (processorType != null) {
|
||||
exception.addHeader("processor_type", processorType);
|
||||
}
|
||||
if (processorTag != null) {
|
||||
exception.addHeader("processor_tag", processorTag);
|
||||
}
|
||||
|
||||
return exception;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.ingest.core;
|
||||
|
||||
import org.elasticsearch.common.Base64;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.index.mapper.internal.IdFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.IndexFieldMapper;
|
||||
|
@ -30,11 +29,11 @@ import org.elasticsearch.index.mapper.internal.TTLFieldMapper;
|
|||
import org.elasticsearch.index.mapper.internal.TimestampFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.text.DateFormat;
|
||||
import java.text.SimpleDateFormat;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Base64;
|
||||
import java.util.Date;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
|
@ -43,8 +42,6 @@ import java.util.Map;
|
|||
import java.util.Objects;
|
||||
import java.util.TimeZone;
|
||||
|
||||
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||
|
||||
/**
|
||||
* Represents a single document being captured before indexing and holds the source and metadata (like id, type and index).
|
||||
*/
|
||||
|
@ -144,11 +141,7 @@ public final class IngestDocument {
|
|||
if (object instanceof byte[]) {
|
||||
return (byte[]) object;
|
||||
} else if (object instanceof String) {
|
||||
try {
|
||||
return Base64.decode(object.toString().getBytes(UTF_8));
|
||||
} catch (IOException e) {
|
||||
throw new IllegalArgumentException("Could not base64 decode path [ " + path + "]", e);
|
||||
}
|
||||
return Base64.getDecoder().decode(object.toString());
|
||||
} else {
|
||||
throw new IllegalArgumentException("Content field [" + path + "] of unknown type [" + object.getClass().getName() +
|
||||
"], must be string or byte array");
|
||||
|
@ -464,7 +457,6 @@ public final class IngestDocument {
|
|||
|
||||
private static void appendValues(List<Object> list, Object value) {
|
||||
if (value instanceof List) {
|
||||
@SuppressWarnings("unchecked")
|
||||
List<?> valueList = (List<?>) value;
|
||||
valueList.stream().forEach(list::add);
|
||||
} else {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue