Merge branch 'master' into rolling_upgrades

This commit is contained in:
Ryan Ernst 2016-09-06 15:52:03 -07:00
commit e6ac27fcfa
631 changed files with 8101 additions and 4692 deletions

5
.gitignore vendored
View File

@ -20,6 +20,11 @@ nbactions.xml
.gradle/ .gradle/
build/ build/
# gradle wrapper
/gradle/
gradlew
gradlew.bat
# maven stuff (to be removed when trunk becomes 4.x) # maven stuff (to be removed when trunk becomes 4.x)
*-execution-hints.log *-execution-hints.log
target/ target/

View File

@ -18,6 +18,7 @@
*/ */
package org.elasticsearch.gradle.doc package org.elasticsearch.gradle.doc
import org.elasticsearch.gradle.VersionProperties
import org.elasticsearch.gradle.test.RestTestPlugin import org.elasticsearch.gradle.test.RestTestPlugin
import org.gradle.api.Project import org.gradle.api.Project
import org.gradle.api.Task import org.gradle.api.Task
@ -30,9 +31,19 @@ public class DocsTestPlugin extends RestTestPlugin {
@Override @Override
public void apply(Project project) { public void apply(Project project) {
super.apply(project) super.apply(project)
Map<String, String> defaultSubstitutions = [
/* These match up with the asciidoc syntax for substitutions but
* the values may differ. In particular {version} needs to resolve
* to the version being built for testing but needs to resolve to
* the last released version for docs. */
'\\{version\\}':
VersionProperties.elasticsearch.replace('-SNAPSHOT', ''),
'\\{lucene_version\\}' : VersionProperties.lucene,
]
Task listSnippets = project.tasks.create('listSnippets', SnippetsTask) Task listSnippets = project.tasks.create('listSnippets', SnippetsTask)
listSnippets.group 'Docs' listSnippets.group 'Docs'
listSnippets.description 'List each snippet' listSnippets.description 'List each snippet'
listSnippets.defaultSubstitutions = defaultSubstitutions
listSnippets.perSnippet { println(it.toString()) } listSnippets.perSnippet { println(it.toString()) }
Task listConsoleCandidates = project.tasks.create( Task listConsoleCandidates = project.tasks.create(
@ -40,6 +51,7 @@ public class DocsTestPlugin extends RestTestPlugin {
listConsoleCandidates.group 'Docs' listConsoleCandidates.group 'Docs'
listConsoleCandidates.description listConsoleCandidates.description
'List snippets that probably should be marked // CONSOLE' 'List snippets that probably should be marked // CONSOLE'
listConsoleCandidates.defaultSubstitutions = defaultSubstitutions
listConsoleCandidates.perSnippet { listConsoleCandidates.perSnippet {
if ( if (
it.console != null // Already marked, nothing to do it.console != null // Already marked, nothing to do
@ -47,19 +59,17 @@ public class DocsTestPlugin extends RestTestPlugin {
) { ) {
return return
} }
List<String> languages = [ if ( // js almost always should be `// CONSOLE`
// This language should almost always be marked console it.language == 'js' ||
'js', // snippets containing `curl` *probably* should
// These are often curl commands that should be converted but // be `// CONSOLE`
// are probably false positives it.curl) {
'sh', 'shell', println(it.toString())
]
if (false == languages.contains(it.language)) {
return
} }
println(it.toString())
} }
project.tasks.create('buildRestTests', RestTestsFromSnippetsTask) Task buildRestTests = project.tasks.create(
'buildRestTests', RestTestsFromSnippetsTask)
buildRestTests.defaultSubstitutions = defaultSubstitutions
} }
} }

View File

@ -146,6 +146,9 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
void emitDo(String method, String pathAndQuery, String body, void emitDo(String method, String pathAndQuery, String body,
String catchPart, List warnings, boolean inSetup) { String catchPart, List warnings, boolean inSetup) {
def (String path, String query) = pathAndQuery.tokenize('?') def (String path, String query) = pathAndQuery.tokenize('?')
if (path == null) {
path = '' // Catch requests to the root...
}
current.println(" - do:") current.println(" - do:")
if (catchPart != null) { if (catchPart != null) {
current.println(" catch: $catchPart") current.println(" catch: $catchPart")

View File

@ -22,6 +22,7 @@ package org.elasticsearch.gradle.doc
import org.gradle.api.DefaultTask import org.gradle.api.DefaultTask
import org.gradle.api.InvalidUserDataException import org.gradle.api.InvalidUserDataException
import org.gradle.api.file.ConfigurableFileTree import org.gradle.api.file.ConfigurableFileTree
import org.gradle.api.tasks.Input
import org.gradle.api.tasks.InputFiles import org.gradle.api.tasks.InputFiles
import org.gradle.api.tasks.TaskAction import org.gradle.api.tasks.TaskAction
@ -60,6 +61,12 @@ public class SnippetsTask extends DefaultTask {
exclude 'build' exclude 'build'
} }
/**
* Substitutions done on every snippet's contents.
*/
@Input
Map<String, String> defaultSubstitutions = [:]
@TaskAction @TaskAction
public void executeTask() { public void executeTask() {
/* /*
@ -75,21 +82,39 @@ public class SnippetsTask extends DefaultTask {
Closure emit = { Closure emit = {
snippet.contents = contents.toString() snippet.contents = contents.toString()
contents = null contents = null
Closure doSubstitution = { String pattern, String subst ->
/*
* $body is really common but it looks like a
* backreference so we just escape it here to make the
* tests cleaner.
*/
subst = subst.replace('$body', '\\$body')
// \n is a new line....
subst = subst.replace('\\n', '\n')
snippet.contents = snippet.contents.replaceAll(
pattern, subst)
}
defaultSubstitutions.each doSubstitution
if (substitutions != null) { if (substitutions != null) {
substitutions.each { String pattern, String subst -> substitutions.each doSubstitution
/*
* $body is really common but it looks like a
* backreference so we just escape it here to make the
* tests cleaner.
*/
subst = subst.replace('$body', '\\$body')
// \n is a new line....
subst = subst.replace('\\n', '\n')
snippet.contents = snippet.contents.replaceAll(
pattern, subst)
}
substitutions = null substitutions = null
} }
if (snippet.language == null) {
throw new InvalidUserDataException("$snippet: "
+ "Snippet missing a language. This is required by "
+ "Elasticsearch's doc testing infrastructure so we "
+ "be sure we don't accidentally forget to test a "
+ "snippet.")
}
// Try to detect snippets that contain `curl`
if (snippet.language == 'sh' || snippet.language == 'shell') {
snippet.curl = snippet.contents.contains('curl')
if (snippet.console == false && snippet.curl == false) {
throw new InvalidUserDataException("$snippet: "
+ "No need for NOTCONSOLE if snippet doesn't "
+ "contain `curl`.")
}
}
perSnippet(snippet) perSnippet(snippet)
snippet = null snippet = null
} }
@ -107,7 +132,7 @@ public class SnippetsTask extends DefaultTask {
} }
return return
} }
matcher = line =~ /\[source,(\w+)]\s*/ matcher = line =~ /\["?source"?,\s*"?(\w+)"?(,.*)?].*/
if (matcher.matches()) { if (matcher.matches()) {
lastLanguage = matcher.group(1) lastLanguage = matcher.group(1)
lastLanguageLine = lineNumber lastLanguageLine = lineNumber
@ -250,6 +275,7 @@ public class SnippetsTask extends DefaultTask {
String language = null String language = null
String catchPart = null String catchPart = null
String setup = null String setup = null
boolean curl
List warnings = new ArrayList() List warnings = new ArrayList()
@Override @Override
@ -285,6 +311,9 @@ public class SnippetsTask extends DefaultTask {
if (testSetup) { if (testSetup) {
result += '// TESTSETUP' result += '// TESTSETUP'
} }
if (curl) {
result += '(curl)'
}
return result return result
} }
} }

View File

@ -97,8 +97,8 @@ public class PluginBuildPlugin extends BuildPlugin {
// with a full elasticsearch server that includes optional deps // with a full elasticsearch server that includes optional deps
provided "org.locationtech.spatial4j:spatial4j:${project.versions.spatial4j}" provided "org.locationtech.spatial4j:spatial4j:${project.versions.spatial4j}"
provided "com.vividsolutions:jts:${project.versions.jts}" provided "com.vividsolutions:jts:${project.versions.jts}"
provided "log4j:log4j:${project.versions.log4j}" provided "org.apache.logging.log4j:log4j-api:${project.versions.log4j}"
provided "log4j:apache-log4j-extras:${project.versions.log4j}" provided "org.apache.logging.log4j:log4j-core:${project.versions.log4j}"
provided "net.java.dev.jna:jna:${project.versions.jna}" provided "net.java.dev.jna:jna:${project.versions.jna}"
} }
} }

View File

@ -59,7 +59,8 @@ class PrecommitTasks {
* use the NamingConventionsCheck we break the circular dependency * use the NamingConventionsCheck we break the circular dependency
* here. * here.
*/ */
precommitTasks.add(configureLoggerUsage(project)) // https://github.com/elastic/elasticsearch/issues/20243
// precommitTasks.add(configureLoggerUsage(project))
} }

View File

@ -10,6 +10,9 @@
<suppress files="org[/\\]elasticsearch[/\\]painless[/\\]antlr[/\\]PainlessLexer\.java" checks="." /> <suppress files="org[/\\]elasticsearch[/\\]painless[/\\]antlr[/\\]PainlessLexer\.java" checks="." />
<suppress files="org[/\\]elasticsearch[/\\]painless[/\\]antlr[/\\]PainlessParser(|BaseVisitor|Visitor)\.java" checks="." /> <suppress files="org[/\\]elasticsearch[/\\]painless[/\\]antlr[/\\]PainlessParser(|BaseVisitor|Visitor)\.java" checks="." />
<!-- ThrowableProxy is a forked copy from Log4j to hack around a bug; this can be removed when the hack is removed -->
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]apache[/\\]logging[/\\]log4j[/\\]core[/\\]impl[/\\]ThrowableProxy.java" checks="RegexpSinglelineJava" />
<!-- Hopefully temporary suppression of LineLength on files that don't pass it. We should remove these when we the <!-- Hopefully temporary suppression of LineLength on files that don't pass it. We should remove these when we the
files start to pass. --> files start to pass. -->
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]apache[/\\]lucene[/\\]queries[/\\]BlendedTermQuery.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]apache[/\\]lucene[/\\]queries[/\\]BlendedTermQuery.java" checks="LineLength" />
@ -21,7 +24,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]health[/\\]ClusterHealthRequestBuilder.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]health[/\\]ClusterHealthRequestBuilder.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]health[/\\]TransportClusterHealthAction.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]health[/\\]TransportClusterHealthAction.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]node[/\\]hotthreads[/\\]NodesHotThreadsRequestBuilder.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]node[/\\]hotthreads[/\\]NodesHotThreadsRequestBuilder.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]node[/\\]info[/\\]NodeInfo.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]node[/\\]stats[/\\]NodesStatsRequestBuilder.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]node[/\\]stats[/\\]NodesStatsRequestBuilder.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]repositories[/\\]delete[/\\]DeleteRepositoryRequestBuilder.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]repositories[/\\]delete[/\\]DeleteRepositoryRequestBuilder.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]repositories[/\\]delete[/\\]TransportDeleteRepositoryAction.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]repositories[/\\]delete[/\\]TransportDeleteRepositoryAction.java" checks="LineLength" />
@ -301,7 +303,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]network[/\\]NetworkModule.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]network[/\\]NetworkModule.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]network[/\\]NetworkService.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]network[/\\]NetworkService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]recycler[/\\]Recyclers.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]recycler[/\\]Recyclers.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]unit[/\\]ByteSizeValue.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]BigArrays.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]BigArrays.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]CancellableThreads.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]CancellableThreads.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]CollectionUtils.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]CollectionUtils.java" checks="LineLength" />
@ -386,7 +387,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]MapperService.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]MapperService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]Mapping.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]Mapping.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]MetadataFieldMapper.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]MetadataFieldMapper.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]ParsedDocument.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]CompletionFieldMapper.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]CompletionFieldMapper.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]LegacyDateFieldMapper.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]LegacyDateFieldMapper.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]LegacyDoubleFieldMapper.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]LegacyDoubleFieldMapper.java" checks="LineLength" />
@ -469,9 +469,7 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]jvm[/\\]JvmStats.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]jvm[/\\]JvmStats.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]node[/\\]Node.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]node[/\\]Node.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]node[/\\]internal[/\\]InternalSettingsPreparer.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]node[/\\]internal[/\\]InternalSettingsPreparer.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]DummyPluginInfo.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]PluginsService.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]PluginsService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]RemovePluginCommand.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]RepositoriesService.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]RepositoriesService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]Repository.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]Repository.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]RepositoryModule.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]RepositoryModule.java" checks="LineLength" />
@ -627,7 +625,7 @@
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]SimulatePipelineResponseTests.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]SimulatePipelineResponseTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]WriteableIngestDocumentTests.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]WriteableIngestDocumentTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]SearchRequestBuilderTests.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]SearchRequestBuilderTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]AutoCreateIndexTests.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]IndicesOptionsTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]IndicesOptionsTests.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]IndicesOptionsTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]TransportActionFilterChainTests.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]TransportActionFilterChainTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]WaitActiveShardCountIT.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]WaitActiveShardCountIT.java" checks="LineLength" />
@ -865,7 +863,6 @@
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]CorruptedFileIT.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]CorruptedFileIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]CorruptedTranslogIT.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]CorruptedTranslogIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]DirectoryUtilsTests.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]DirectoryUtilsTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]ExceptionRetryIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]IndexStoreTests.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]IndexStoreTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]StoreTests.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]StoreTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]suggest[/\\]stats[/\\]SuggestStatsIT.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]suggest[/\\]stats[/\\]SuggestStatsIT.java" checks="LineLength" />
@ -905,8 +902,6 @@
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]template[/\\]SimpleIndexTemplateIT.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]template[/\\]SimpleIndexTemplateIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]mget[/\\]SimpleMgetIT.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]mget[/\\]SimpleMgetIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]jvm[/\\]JvmGcMonitorServiceSettingsTests.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]jvm[/\\]JvmGcMonitorServiceSettingsTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]os[/\\]OsProbeTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]nodesinfo[/\\]NodeInfoStreamingTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]PluginInfoTests.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]PluginInfoTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]PluginsServiceTests.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]PluginsServiceTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]recovery[/\\]FullRollingRestartIT.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]recovery[/\\]FullRollingRestartIT.java" checks="LineLength" />
@ -994,7 +989,6 @@
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]ESBlobStoreRepositoryIntegTestCase.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]ESBlobStoreRepositoryIntegTestCase.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]geo[/\\]RandomShapeGenerator.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]geo[/\\]RandomShapeGenerator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]hamcrest[/\\]ElasticsearchGeoAssertions.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]hamcrest[/\\]ElasticsearchGeoAssertions.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]threadpool[/\\]ThreadPoolSerializationTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]timestamp[/\\]SimpleTimestampIT.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]timestamp[/\\]SimpleTimestampIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]tribe[/\\]TribeIT.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]tribe[/\\]TribeIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]ttl[/\\]SimpleTTLIT.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]ttl[/\\]SimpleTTLIT.java" checks="LineLength" />

View File

@ -6,7 +6,7 @@ spatial4j = 0.6
jts = 1.13 jts = 1.13
jackson = 2.8.1 jackson = 2.8.1
snakeyaml = 1.15 snakeyaml = 1.15
log4j = 1.2.17 log4j = 2.6.2
slf4j = 1.6.2 slf4j = 1.6.2
jna = 4.2.2 jna = 4.2.2

View File

@ -18,13 +18,13 @@
*/ */
package org.elasticsearch.client.benchmark.ops.bulk; package org.elasticsearch.client.benchmark.ops.bulk;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.client.benchmark.BenchmarkTask; import org.elasticsearch.client.benchmark.BenchmarkTask;
import org.elasticsearch.client.benchmark.metrics.Sample; import org.elasticsearch.client.benchmark.metrics.Sample;
import org.elasticsearch.client.benchmark.metrics.SampleRecorder; import org.elasticsearch.client.benchmark.metrics.SampleRecorder;
import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.logging.ESLoggerFactory;
import java.io.BufferedReader; import java.io.BufferedReader;
@ -135,7 +135,7 @@ public class BulkBenchmarkTask implements BenchmarkTask {
private static final class BulkIndexer implements Runnable { private static final class BulkIndexer implements Runnable {
private static final ESLogger logger = ESLoggerFactory.getLogger(BulkIndexer.class.getName()); private static final Logger logger = ESLoggerFactory.getLogger(BulkIndexer.class.getName());
private final BlockingQueue<List<String>> bulkData; private final BlockingQueue<List<String>> bulkData;
private final int warmupIterations; private final int warmupIterations;

View File

@ -85,8 +85,10 @@ dependencies {
compile "com.vividsolutions:jts:${versions.jts}", optional compile "com.vividsolutions:jts:${versions.jts}", optional
// logging // logging
compile "log4j:log4j:${versions.log4j}", optional compile "org.apache.logging.log4j:log4j-api:${versions.log4j}", optional
compile "log4j:apache-log4j-extras:${versions.log4j}", optional compile "org.apache.logging.log4j:log4j-core:${versions.log4j}", optional
// to bridge dependencies that are still on Log4j 1 to Log4j 2
compile "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}", optional
compile "net.java.dev.jna:jna:${versions.jna}" compile "net.java.dev.jna:jna:${versions.jna}"
@ -154,32 +156,94 @@ thirdPartyAudit.excludes = [
// classes are missing! // classes are missing!
// from com.fasterxml.jackson.dataformat.yaml.YAMLMapper (jackson-dataformat-yaml) // from com.fasterxml.jackson.dataformat.yaml.YAMLMapper (jackson-dataformat-yaml)
'com.fasterxml.jackson.databind.ObjectMapper', 'com.fasterxml.jackson.databind.ObjectMapper',
// from org.apache.log4j.receivers.net.JMSReceiver (log4j-extras) // from log4j
'javax.jms.Message', 'com.fasterxml.jackson.annotation.JsonInclude$Include',
'javax.jms.MessageListener', 'com.fasterxml.jackson.databind.DeserializationContext',
'javax.jms.ObjectMessage', 'com.fasterxml.jackson.databind.JsonMappingException',
'javax.jms.TopicConnection', 'com.fasterxml.jackson.databind.JsonNode',
'javax.jms.TopicConnectionFactory', 'com.fasterxml.jackson.databind.Module$SetupContext',
'javax.jms.TopicPublisher', 'com.fasterxml.jackson.databind.ObjectReader',
'javax.jms.TopicSession', 'com.fasterxml.jackson.databind.ObjectWriter',
'javax.jms.TopicSubscriber', 'com.fasterxml.jackson.databind.SerializerProvider',
'com.fasterxml.jackson.databind.deser.std.StdDeserializer',
'com.fasterxml.jackson.databind.deser.std.StdScalarDeserializer',
'com.fasterxml.jackson.databind.module.SimpleModule',
'com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter',
'com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider',
'com.fasterxml.jackson.databind.ser.std.StdScalarSerializer',
'com.fasterxml.jackson.databind.ser.std.StdSerializer',
'com.fasterxml.jackson.dataformat.xml.JacksonXmlModule',
'com.fasterxml.jackson.dataformat.xml.XmlMapper',
'com.fasterxml.jackson.dataformat.xml.util.DefaultXmlPrettyPrinter',
'com.lmax.disruptor.BlockingWaitStrategy',
'com.lmax.disruptor.BusySpinWaitStrategy',
'com.lmax.disruptor.EventFactory',
'com.lmax.disruptor.EventTranslator',
'com.lmax.disruptor.EventTranslatorTwoArg',
'com.lmax.disruptor.EventTranslatorVararg',
'com.lmax.disruptor.ExceptionHandler',
'com.lmax.disruptor.LifecycleAware',
'com.lmax.disruptor.RingBuffer',
'com.lmax.disruptor.Sequence',
'com.lmax.disruptor.SequenceReportingEventHandler',
'com.lmax.disruptor.SleepingWaitStrategy',
'com.lmax.disruptor.TimeoutBlockingWaitStrategy',
'com.lmax.disruptor.WaitStrategy',
'com.lmax.disruptor.YieldingWaitStrategy',
'com.lmax.disruptor.dsl.Disruptor',
'com.lmax.disruptor.dsl.ProducerType',
'javax.jms.Connection',
'javax.jms.ConnectionFactory',
'javax.jms.Destination',
'javax.jms.Message',
'javax.jms.MessageConsumer',
'javax.jms.MessageListener',
'javax.jms.MessageProducer',
'javax.jms.ObjectMessage',
'javax.jms.Session',
'javax.mail.Authenticator',
'javax.mail.Message$RecipientType',
'javax.mail.PasswordAuthentication',
'javax.mail.Session',
'javax.mail.Transport',
'javax.mail.internet.InternetAddress',
'javax.mail.internet.InternetHeaders',
'javax.mail.internet.MimeBodyPart',
'javax.mail.internet.MimeMessage',
'javax.mail.internet.MimeMultipart',
'javax.mail.internet.MimeUtility',
'javax.mail.util.ByteArrayDataSource',
'javax.persistence.AttributeConverter',
'javax.persistence.EntityManager',
'javax.persistence.EntityManagerFactory',
'javax.persistence.EntityTransaction',
'javax.persistence.Persistence',
'javax.persistence.PersistenceException',
'org.apache.commons.compress.compressors.CompressorStreamFactory',
'org.apache.commons.compress.utils.IOUtils',
'org.apache.commons.csv.CSVFormat',
'org.apache.commons.csv.QuoteMode',
'org.apache.kafka.clients.producer.KafkaProducer',
'org.apache.kafka.clients.producer.Producer',
'org.apache.kafka.clients.producer.ProducerRecord',
'org.codehaus.stax2.XMLStreamWriter2',
'org.osgi.framework.AdaptPermission',
'org.osgi.framework.AdminPermission',
'org.osgi.framework.Bundle',
'org.osgi.framework.BundleActivator',
'org.osgi.framework.BundleContext',
'org.osgi.framework.BundleEvent',
'org.osgi.framework.BundleReference',
'org.osgi.framework.FrameworkUtil',
'org.osgi.framework.SynchronousBundleListener',
'org.osgi.framework.wiring.BundleWire',
'org.osgi.framework.wiring.BundleWiring',
'org.zeromq.ZMQ$Context',
'org.zeromq.ZMQ$Socket',
'org.zeromq.ZMQ',
// from org.apache.log4j.net.SMTPAppender (log4j)
'javax.mail.Authenticator',
'javax.mail.Message$RecipientType',
'javax.mail.Message',
'javax.mail.Multipart',
'javax.mail.PasswordAuthentication',
'javax.mail.Session',
'javax.mail.Transport',
'javax.mail.internet.InternetAddress',
'javax.mail.internet.InternetHeaders',
'javax.mail.internet.MimeBodyPart',
'javax.mail.internet.MimeMessage',
'javax.mail.internet.MimeMultipart',
'javax.mail.internet.MimeUtility',
// from org.locationtech.spatial4j.io.GeoJSONReader (spatial4j) // from org.locationtech.spatial4j.io.GeoJSONReader (spatial4j)
'org.noggit.JSONParser', 'org.noggit.JSONParser',
] ]

View File

@ -1,37 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.log4j;
import org.apache.log4j.helpers.ThreadLocalMap;
/**
* Log4j 1.2 MDC breaks because it parses java.version incorrectly (does not handle new java9 versioning).
*
* This hack fixes up the pkg private members as if it had detected the java version correctly.
*/
public class Java9Hack {
public static void fixLog4j() {
if (MDC.mdc.tlm == null) {
MDC.mdc.java1 = false;
MDC.mdc.tlm = new ThreadLocalMap();
}
}
}

View File

@ -1,23 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Hack to fix Log4j 1.2 in Java 9.
*/
package org.apache.log4j;

View File

@ -0,0 +1,665 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache license, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the license for the specific language governing permissions and
* limitations under the license.
*/
package org.apache.logging.log4j.core.impl;
import java.io.Serializable;
import java.net.URL;
import java.security.CodeSource;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.Stack;
import org.apache.logging.log4j.core.util.Loader;
import org.apache.logging.log4j.status.StatusLogger;
import org.apache.logging.log4j.util.ReflectionUtil;
import org.apache.logging.log4j.util.Strings;
/**
* Wraps a Throwable to add packaging information about each stack trace element.
*
* <p>
* A proxy is used to represent a throwable that may not exist in a different class loader or JVM. When an application
* deserializes a ThrowableProxy, the throwable may not be set, but the throwable's information is preserved in other
* fields of the proxy like the message and stack trace.
* </p>
*
* <p>
* TODO: Move this class to org.apache.logging.log4j.core because it is used from LogEvent.
* </p>
* <p>
* TODO: Deserialize: Try to rebuild Throwable if the target exception is in this class loader?
* </p>
*/
public class ThrowableProxy implements Serializable {
private static final String CAUSED_BY_LABEL = "Caused by: ";
private static final String SUPPRESSED_LABEL = "Suppressed: ";
private static final String WRAPPED_BY_LABEL = "Wrapped by: ";
/**
* Cached StackTracePackageElement and ClassLoader.
* <p>
* Consider this class private.
* </p>
*/
static class CacheEntry {
private final ExtendedClassInfo element;
private final ClassLoader loader;
public CacheEntry(final ExtendedClassInfo element, final ClassLoader loader) {
this.element = element;
this.loader = loader;
}
}
private static final ThrowableProxy[] EMPTY_THROWABLE_PROXY_ARRAY = new ThrowableProxy[0];
private static final char EOL = '\n';
private static final long serialVersionUID = -2752771578252251910L;
private final ThrowableProxy causeProxy;
private int commonElementCount;
private final ExtendedStackTraceElement[] extendedStackTrace;
private final String localizedMessage;
private final String message;
private final String name;
private final ThrowableProxy[] suppressedProxies;
private final transient Throwable throwable;
/**
* For JSON and XML IO via Jackson.
*/
@SuppressWarnings("unused")
private ThrowableProxy() {
this.throwable = null;
this.name = null;
this.extendedStackTrace = null;
this.causeProxy = null;
this.message = null;
this.localizedMessage = null;
this.suppressedProxies = EMPTY_THROWABLE_PROXY_ARRAY;
}
/**
* Constructs the wrapper for the Throwable that includes packaging data.
*
* @param throwable
* The Throwable to wrap, must not be null.
*/
public ThrowableProxy(final Throwable throwable) {
this(throwable, null);
}
/**
* Constructs the wrapper for the Throwable that includes packaging data.
*
* @param throwable
* The Throwable to wrap, must not be null.
* @param visited
* The set of visited suppressed exceptions.
*/
private ThrowableProxy(final Throwable throwable, final Set<Throwable> visited) {
this.throwable = throwable;
this.name = throwable.getClass().getName();
this.message = throwable.getMessage();
this.localizedMessage = throwable.getLocalizedMessage();
final Map<String, CacheEntry> map = new HashMap<>();
final Stack<Class<?>> stack = ReflectionUtil.getCurrentStackTrace();
this.extendedStackTrace = this.toExtendedStackTrace(stack, map, null, throwable.getStackTrace());
final Throwable throwableCause = throwable.getCause();
final Set<Throwable> causeVisited = new HashSet<>(1);
this.causeProxy = throwableCause == null ? null : new ThrowableProxy(throwable, stack, map, throwableCause, visited, causeVisited);
this.suppressedProxies = this.toSuppressedProxies(throwable, visited);
}
/**
* Constructs the wrapper for a Throwable that is referenced as the cause by another Throwable.
*
* @param parent
* The Throwable referencing this Throwable.
* @param stack
* The Class stack.
* @param map
* The cache containing the packaging data.
* @param cause
* The Throwable to wrap.
* @param suppressedVisited TODO
* @param causeVisited TODO
*/
private ThrowableProxy(final Throwable parent, final Stack<Class<?>> stack, final Map<String, CacheEntry> map,
final Throwable cause, final Set<Throwable> suppressedVisited, final Set<Throwable> causeVisited) {
causeVisited.add(cause);
this.throwable = cause;
this.name = cause.getClass().getName();
this.message = this.throwable.getMessage();
this.localizedMessage = this.throwable.getLocalizedMessage();
this.extendedStackTrace = this.toExtendedStackTrace(stack, map, parent.getStackTrace(), cause.getStackTrace());
final Throwable causeCause = cause.getCause();
this.causeProxy = causeCause == null || causeVisited.contains(causeCause) ? null : new ThrowableProxy(parent,
stack, map, causeCause, suppressedVisited, causeVisited);
this.suppressedProxies = this.toSuppressedProxies(cause, suppressedVisited);
}
@Override
public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (this.getClass() != obj.getClass()) {
return false;
}
final ThrowableProxy other = (ThrowableProxy) obj;
if (this.causeProxy == null) {
if (other.causeProxy != null) {
return false;
}
} else if (!this.causeProxy.equals(other.causeProxy)) {
return false;
}
if (this.commonElementCount != other.commonElementCount) {
return false;
}
if (this.name == null) {
if (other.name != null) {
return false;
}
} else if (!this.name.equals(other.name)) {
return false;
}
if (!Arrays.equals(this.extendedStackTrace, other.extendedStackTrace)) {
return false;
}
if (!Arrays.equals(this.suppressedProxies, other.suppressedProxies)) {
return false;
}
return true;
}
private void formatCause(final StringBuilder sb, final String prefix, final ThrowableProxy cause, final List<String> ignorePackages) {
formatThrowableProxy(sb, prefix, CAUSED_BY_LABEL, cause, ignorePackages);
}
private void formatThrowableProxy(final StringBuilder sb, final String prefix, final String causeLabel,
final ThrowableProxy throwableProxy, final List<String> ignorePackages) {
if (throwableProxy == null) {
return;
}
sb.append(prefix).append(causeLabel).append(throwableProxy).append(EOL);
this.formatElements(sb, prefix, throwableProxy.commonElementCount,
throwableProxy.getStackTrace(), throwableProxy.extendedStackTrace, ignorePackages);
this.formatSuppressed(sb, prefix + "\t", throwableProxy.suppressedProxies, ignorePackages);
this.formatCause(sb, prefix, throwableProxy.causeProxy, ignorePackages);
}
private void formatSuppressed(final StringBuilder sb, final String prefix, final ThrowableProxy[] suppressedProxies,
final List<String> ignorePackages) {
if (suppressedProxies == null) {
return;
}
for (final ThrowableProxy suppressedProxy : suppressedProxies) {
final ThrowableProxy cause = suppressedProxy;
formatThrowableProxy(sb, prefix, SUPPRESSED_LABEL, cause, ignorePackages);
}
}
private void formatElements(final StringBuilder sb, final String prefix, final int commonCount,
final StackTraceElement[] causedTrace, final ExtendedStackTraceElement[] extStackTrace,
final List<String> ignorePackages) {
if (ignorePackages == null || ignorePackages.isEmpty()) {
for (final ExtendedStackTraceElement element : extStackTrace) {
this.formatEntry(element, sb, prefix);
}
} else {
int count = 0;
for (int i = 0; i < extStackTrace.length; ++i) {
if (!this.ignoreElement(causedTrace[i], ignorePackages)) {
if (count > 0) {
appendSuppressedCount(sb, prefix, count);
count = 0;
}
this.formatEntry(extStackTrace[i], sb, prefix);
} else {
++count;
}
}
if (count > 0) {
appendSuppressedCount(sb, prefix, count);
}
}
if (commonCount != 0) {
sb.append(prefix).append("\t... ").append(commonCount).append(" more").append(EOL);
}
}
private void appendSuppressedCount(final StringBuilder sb, final String prefix, final int count) {
sb.append(prefix);
if (count == 1) {
sb.append("\t....").append(EOL);
} else {
sb.append("\t... suppressed ").append(count).append(" lines").append(EOL);
}
}
private void formatEntry(final ExtendedStackTraceElement extStackTraceElement, final StringBuilder sb, final String prefix) {
sb.append(prefix);
sb.append("\tat ");
sb.append(extStackTraceElement);
sb.append(EOL);
}
/**
* Formats the specified Throwable.
*
* @param sb
* StringBuilder to contain the formatted Throwable.
* @param cause
* The Throwable to format.
*/
public void formatWrapper(final StringBuilder sb, final ThrowableProxy cause) {
this.formatWrapper(sb, cause, null);
}
/**
* Formats the specified Throwable.
*
* @param sb
* StringBuilder to contain the formatted Throwable.
* @param cause
* The Throwable to format.
* @param packages
* The List of packages to be suppressed from the trace.
*/
@SuppressWarnings("ThrowableResultOfMethodCallIgnored")
public void formatWrapper(final StringBuilder sb, final ThrowableProxy cause, final List<String> packages) {
final Throwable caused = cause.getCauseProxy() != null ? cause.getCauseProxy().getThrowable() : null;
if (caused != null) {
this.formatWrapper(sb, cause.causeProxy);
sb.append(WRAPPED_BY_LABEL);
}
sb.append(cause).append(EOL);
this.formatElements(sb, "", cause.commonElementCount,
cause.getThrowable().getStackTrace(), cause.extendedStackTrace, packages);
}
public ThrowableProxy getCauseProxy() {
return this.causeProxy;
}
/**
* Format the Throwable that is the cause of this Throwable.
*
* @return The formatted Throwable that caused this Throwable.
*/
public String getCauseStackTraceAsString() {
return this.getCauseStackTraceAsString(null);
}
/**
* Format the Throwable that is the cause of this Throwable.
*
* @param packages
* The List of packages to be suppressed from the trace.
* @return The formatted Throwable that caused this Throwable.
*/
public String getCauseStackTraceAsString(final List<String> packages) {
final StringBuilder sb = new StringBuilder();
if (this.causeProxy != null) {
this.formatWrapper(sb, this.causeProxy);
sb.append(WRAPPED_BY_LABEL);
}
sb.append(this.toString());
sb.append(EOL);
this.formatElements(sb, "", 0, this.throwable.getStackTrace(), this.extendedStackTrace, packages);
return sb.toString();
}
/**
* Return the number of elements that are being omitted because they are common with the parent Throwable's stack
* trace.
*
* @return The number of elements omitted from the stack trace.
*/
public int getCommonElementCount() {
return this.commonElementCount;
}
/**
* Gets the stack trace including packaging information.
*
* @return The stack trace including packaging information.
*/
public ExtendedStackTraceElement[] getExtendedStackTrace() {
return this.extendedStackTrace;
}
/**
* Format the stack trace including packaging information.
*
* @return The formatted stack trace including packaging information.
*/
public String getExtendedStackTraceAsString() {
return this.getExtendedStackTraceAsString(null);
}
/**
* Format the stack trace including packaging information.
*
* @param ignorePackages
* List of packages to be ignored in the trace.
* @return The formatted stack trace including packaging information.
*/
public String getExtendedStackTraceAsString(final List<String> ignorePackages) {
final StringBuilder sb = new StringBuilder(this.name);
final String msg = this.message;
if (msg != null) {
sb.append(": ").append(msg);
}
sb.append(EOL);
final StackTraceElement[] causedTrace = this.throwable != null ? this.throwable.getStackTrace() : null;
this.formatElements(sb, "", 0, causedTrace, this.extendedStackTrace, ignorePackages);
this.formatSuppressed(sb, "\t", this.suppressedProxies, ignorePackages);
this.formatCause(sb, "", this.causeProxy, ignorePackages);
return sb.toString();
}
public String getLocalizedMessage() {
return this.localizedMessage;
}
public String getMessage() {
return this.message;
}
/**
* Return the FQCN of the Throwable.
*
* @return The FQCN of the Throwable.
*/
public String getName() {
return this.name;
}
public StackTraceElement[] getStackTrace() {
return this.throwable == null ? null : this.throwable.getStackTrace();
}
/**
* Gets proxies for suppressed exceptions.
*
* @return proxies for suppressed exceptions.
*/
public ThrowableProxy[] getSuppressedProxies() {
return this.suppressedProxies;
}
/**
* Format the suppressed Throwables.
*
* @return The formatted suppressed Throwables.
*/
public String getSuppressedStackTrace() {
final ThrowableProxy[] suppressed = this.getSuppressedProxies();
if (suppressed == null || suppressed.length == 0) {
return Strings.EMPTY;
}
final StringBuilder sb = new StringBuilder("Suppressed Stack Trace Elements:").append(EOL);
for (final ThrowableProxy proxy : suppressed) {
sb.append(proxy.getExtendedStackTraceAsString());
}
return sb.toString();
}
/**
* The throwable or null if this object is deserialized from XML or JSON.
*
* @return The throwable or null if this object is deserialized from XML or JSON.
*/
public Throwable getThrowable() {
return this.throwable;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + (this.causeProxy == null ? 0 : this.causeProxy.hashCode());
result = prime * result + this.commonElementCount;
result = prime * result + (this.extendedStackTrace == null ? 0 : Arrays.hashCode(this.extendedStackTrace));
result = prime * result + (this.suppressedProxies == null ? 0 : Arrays.hashCode(this.suppressedProxies));
result = prime * result + (this.name == null ? 0 : this.name.hashCode());
return result;
}
private boolean ignoreElement(final StackTraceElement element, final List<String> ignorePackages) {
final String className = element.getClassName();
for (final String pkg : ignorePackages) {
if (className.startsWith(pkg)) {
return true;
}
}
return false;
}
/**
* Loads classes not located via Reflection.getCallerClass.
*
* @param lastLoader
* The ClassLoader that loaded the Class that called this Class.
* @param className
* The name of the Class.
* @return The Class object for the Class or null if it could not be located.
*/
private Class<?> loadClass(final ClassLoader lastLoader, final String className) {
// XXX: this is overly complicated
Class<?> clazz;
if (lastLoader != null) {
try {
clazz = Loader.initializeClass(className, lastLoader);
if (clazz != null) {
return clazz;
}
} catch (final Throwable ignore) {
// Ignore exception.
}
}
try {
clazz = Loader.loadClass(className);
} catch (final ClassNotFoundException ignored) {
return initializeClass(className);
} catch (final NoClassDefFoundError ignored) {
return initializeClass(className);
} catch (final SecurityException ignored) {
return initializeClass(className);
}
return clazz;
}
private Class<?> initializeClass(final String className) {
try {
return Loader.initializeClass(className, this.getClass().getClassLoader());
} catch (final ClassNotFoundException ignore) {
return null;
} catch (final NoClassDefFoundError ignore) {
return null;
} catch (final SecurityException ignore) {
return null;
}
}
/**
* Construct the CacheEntry from the Class's information.
*
* @param stackTraceElement
* The stack trace element
* @param callerClass
* The Class.
* @param exact
* True if the class was obtained via Reflection.getCallerClass.
*
* @return The CacheEntry.
*/
private CacheEntry toCacheEntry(final StackTraceElement stackTraceElement, final Class<?> callerClass,
final boolean exact) {
String location = "?";
String version = "?";
ClassLoader lastLoader = null;
if (callerClass != null) {
try {
final CodeSource source = callerClass.getProtectionDomain().getCodeSource();
if (source != null) {
final URL locationURL = source.getLocation();
if (locationURL != null) {
final String str = locationURL.toString().replace('\\', '/');
int index = str.lastIndexOf("/");
if (index >= 0 && index == str.length() - 1) {
index = str.lastIndexOf("/", index - 1);
location = str.substring(index + 1);
} else {
location = str.substring(index + 1);
}
}
}
} catch (final Exception ex) {
// Ignore the exception.
}
final Package pkg = callerClass.getPackage();
if (pkg != null) {
final String ver = pkg.getImplementationVersion();
if (ver != null) {
version = ver;
}
}
lastLoader = callerClass.getClassLoader();
}
return new CacheEntry(new ExtendedClassInfo(exact, location, version), lastLoader);
}
/**
* Resolve all the stack entries in this stack trace that are not common with the parent.
*
* @param stack
* The callers Class stack.
* @param map
* The cache of CacheEntry objects.
* @param rootTrace
* The first stack trace resolve or null.
* @param stackTrace
* The stack trace being resolved.
* @return The StackTracePackageElement array.
*/
ExtendedStackTraceElement[] toExtendedStackTrace(final Stack<Class<?>> stack, final Map<String, CacheEntry> map,
final StackTraceElement[] rootTrace, final StackTraceElement[] stackTrace) {
int stackLength;
if (rootTrace != null) {
int rootIndex = rootTrace.length - 1;
int stackIndex = stackTrace.length - 1;
while (rootIndex >= 0 && stackIndex >= 0 && rootTrace[rootIndex].equals(stackTrace[stackIndex])) {
--rootIndex;
--stackIndex;
}
this.commonElementCount = stackTrace.length - 1 - stackIndex;
stackLength = stackIndex + 1;
} else {
this.commonElementCount = 0;
stackLength = stackTrace.length;
}
final ExtendedStackTraceElement[] extStackTrace = new ExtendedStackTraceElement[stackLength];
Class<?> clazz = stack.isEmpty() ? null : stack.peek();
ClassLoader lastLoader = null;
for (int i = stackLength - 1; i >= 0; --i) {
final StackTraceElement stackTraceElement = stackTrace[i];
final String className = stackTraceElement.getClassName();
// The stack returned from getCurrentStack may be missing entries for java.lang.reflect.Method.invoke()
// and its implementation. The Throwable might also contain stack entries that are no longer
// present as those methods have returned.
ExtendedClassInfo extClassInfo;
if (clazz != null && className.equals(clazz.getName())) {
final CacheEntry entry = this.toCacheEntry(stackTraceElement, clazz, true);
extClassInfo = entry.element;
lastLoader = entry.loader;
stack.pop();
clazz = stack.isEmpty() ? null : stack.peek();
} else {
final CacheEntry cacheEntry = map.get(className);
if (cacheEntry != null) {
final CacheEntry entry = cacheEntry;
extClassInfo = entry.element;
if (entry.loader != null) {
lastLoader = entry.loader;
}
} else {
final CacheEntry entry = this.toCacheEntry(stackTraceElement,
this.loadClass(lastLoader, className), false);
extClassInfo = entry.element;
map.put(stackTraceElement.toString(), entry);
if (entry.loader != null) {
lastLoader = entry.loader;
}
}
}
extStackTrace[i] = new ExtendedStackTraceElement(stackTraceElement, extClassInfo);
}
return extStackTrace;
}
@Override
public String toString() {
final String msg = this.message;
return msg != null ? this.name + ": " + msg : this.name;
}
private ThrowableProxy[] toSuppressedProxies(final Throwable thrown, Set<Throwable> suppressedVisited) {
try {
final Throwable[] suppressed = thrown.getSuppressed();
if (suppressed == null) {
return EMPTY_THROWABLE_PROXY_ARRAY;
}
final List<ThrowableProxy> proxies = new ArrayList<>(suppressed.length);
if (suppressedVisited == null) {
suppressedVisited = new HashSet<>(proxies.size());
}
for (int i = 0; i < suppressed.length; i++) {
final Throwable candidate = suppressed[i];
if (!suppressedVisited.contains(candidate)) {
suppressedVisited.add(candidate);
proxies.add(new ThrowableProxy(candidate, suppressedVisited));
}
}
return proxies.toArray(new ThrowableProxy[proxies.size()]);
} catch (final Exception e) {
StatusLogger.getLogger().error(e);
}
return null;
}
}

View File

@ -102,7 +102,6 @@ public class MapperQueryParser extends QueryParser {
setLowercaseExpandedTerms(settings.lowercaseExpandedTerms()); setLowercaseExpandedTerms(settings.lowercaseExpandedTerms());
setPhraseSlop(settings.phraseSlop()); setPhraseSlop(settings.phraseSlop());
setDefaultOperator(settings.defaultOperator()); setDefaultOperator(settings.defaultOperator());
setFuzzyMinSim(settings.fuzziness().asFloat());
setFuzzyPrefixLength(settings.fuzzyPrefixLength()); setFuzzyPrefixLength(settings.fuzzyPrefixLength());
setLocale(settings.locale()); setLocale(settings.locale());
} }
@ -114,7 +113,7 @@ public class MapperQueryParser extends QueryParser {
@Override @Override
Query handleBareFuzzy(String qfield, Token fuzzySlop, String termImage) throws ParseException { Query handleBareFuzzy(String qfield, Token fuzzySlop, String termImage) throws ParseException {
if (fuzzySlop.image.length() == 1) { if (fuzzySlop.image.length() == 1) {
return getFuzzyQuery(qfield, termImage, Float.toString(fuzzyMinSim)); return getFuzzyQuery(qfield, termImage, Float.toString(settings.fuzziness().asDistance(termImage)));
} }
return getFuzzyQuery(qfield, termImage, fuzzySlop.image.substring(1)); return getFuzzyQuery(qfield, termImage, fuzzySlop.image.substring(1));
} }

View File

@ -19,6 +19,7 @@
package org.elasticsearch; package org.elasticsearch;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.support.replication.ReplicationOperation; import org.elasticsearch.action.support.replication.ReplicationOperation;
import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.action.shard.ShardStateAction;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;

View File

@ -19,12 +19,12 @@
package org.elasticsearch; package org.elasticsearch;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexFormatTooNewException; import org.apache.lucene.index.IndexFormatTooNewException;
import org.apache.lucene.index.IndexFormatTooOldException; import org.apache.lucene.index.IndexFormatTooOldException;
import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.index.Index; import org.elasticsearch.index.Index;
import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.RestStatus;
@ -39,7 +39,7 @@ import java.util.Set;
public final class ExceptionsHelper { public final class ExceptionsHelper {
private static final ESLogger logger = Loggers.getLogger(ExceptionsHelper.class); private static final Logger logger = Loggers.getLogger(ExceptionsHelper.class);
public static RuntimeException convertToRuntime(Exception e) { public static RuntimeException convertToRuntime(Exception e) {
if (e instanceof RuntimeException) { if (e instanceof RuntimeException) {

View File

@ -73,6 +73,8 @@ public class Version {
public static final Version V_2_3_4 = new Version(V_2_3_4_ID, org.apache.lucene.util.Version.LUCENE_5_5_0); public static final Version V_2_3_4 = new Version(V_2_3_4_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
public static final int V_2_3_5_ID = 2030599; public static final int V_2_3_5_ID = 2030599;
public static final Version V_2_3_5 = new Version(V_2_3_5_ID, org.apache.lucene.util.Version.LUCENE_5_5_0); public static final Version V_2_3_5 = new Version(V_2_3_5_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
public static final int V_2_4_0_ID = 2040099;
public static final Version V_2_4_0 = new Version(V_2_4_0_ID, org.apache.lucene.util.Version.LUCENE_5_5_2);
public static final int V_5_0_0_alpha1_ID = 5000001; public static final int V_5_0_0_alpha1_ID = 5000001;
public static final Version V_5_0_0_alpha1 = new Version(V_5_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_0_0); public static final Version V_5_0_0_alpha1 = new Version(V_5_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
public static final int V_5_0_0_alpha2_ID = 5000002; public static final int V_5_0_0_alpha2_ID = 5000002;
@ -110,6 +112,8 @@ public class Version {
return V_5_0_0_alpha2; return V_5_0_0_alpha2;
case V_5_0_0_alpha1_ID: case V_5_0_0_alpha1_ID:
return V_5_0_0_alpha1; return V_5_0_0_alpha1;
case V_2_4_0_ID:
return V_2_4_0;
case V_2_3_5_ID: case V_2_3_5_ID:
return V_2_3_5; return V_2_3_5;
case V_2_3_4_ID: case V_2_3_4_ID:

View File

@ -20,7 +20,6 @@
package org.elasticsearch.action; package org.elasticsearch.action;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet; import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -335,7 +334,7 @@ public class ActionModule extends AbstractModule {
this.actionPlugins = actionPlugins; this.actionPlugins = actionPlugins;
actions = setupActions(actionPlugins); actions = setupActions(actionPlugins);
actionFilters = setupActionFilters(actionPlugins, ingestEnabled); actionFilters = setupActionFilters(actionPlugins, ingestEnabled);
autoCreateIndex = transportClient ? null : new AutoCreateIndex(settings, resolver); autoCreateIndex = transportClient ? null : new AutoCreateIndex(settings, clusterSettings, resolver);
destructiveOperations = new DestructiveOperations(settings, clusterSettings); destructiveOperations = new DestructiveOperations(settings, clusterSettings);
Set<String> headers = actionPlugins.stream().flatMap(p -> p.getRestHeaders().stream()).collect(Collectors.toSet()); Set<String> headers = actionPlugins.stream().flatMap(p -> p.getRestHeaders().stream()).collect(Collectors.toSet());
restController = new RestController(settings, headers); restController = new RestController(settings, headers);

View File

@ -19,6 +19,8 @@
package org.elasticsearch.action.admin.cluster.health; package org.elasticsearch.action.admin.cluster.health;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.ActiveShardCount;
@ -106,7 +108,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
@Override @Override
public void onFailure(String source, Exception e) { public void onFailure(String source, Exception e) {
logger.error("unexpected failure during [{}]", e, source); logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
listener.onFailure(e); listener.onFailure(e);
} }

View File

@ -37,10 +37,6 @@ import org.elasticsearch.threadpool.ThreadPoolInfo;
import org.elasticsearch.transport.TransportInfo; import org.elasticsearch.transport.TransportInfo;
import java.io.IOException; import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import static java.util.Collections.unmodifiableMap;
/** /**
* Node information (static, does not change over time). * Node information (static, does not change over time).
@ -85,8 +81,8 @@ public class NodeInfo extends BaseNodeResponse {
public NodeInfo(Version version, Build build, DiscoveryNode node, @Nullable Settings settings, public NodeInfo(Version version, Build build, DiscoveryNode node, @Nullable Settings settings,
@Nullable OsInfo os, @Nullable ProcessInfo process, @Nullable JvmInfo jvm, @Nullable ThreadPoolInfo threadPool, @Nullable OsInfo os, @Nullable ProcessInfo process, @Nullable JvmInfo jvm, @Nullable ThreadPoolInfo threadPool,
@Nullable TransportInfo transport, @Nullable HttpInfo http, @Nullable PluginsAndModules plugins, @Nullable IngestInfo ingest, @Nullable TransportInfo transport, @Nullable HttpInfo http, @Nullable PluginsAndModules plugins,
@Nullable ByteSizeValue totalIndexingBuffer) { @Nullable IngestInfo ingest, @Nullable ByteSizeValue totalIndexingBuffer) {
super(node); super(node);
this.version = version; this.version = version;
this.build = build; this.build = build;
@ -205,31 +201,14 @@ public class NodeInfo extends BaseNodeResponse {
if (in.readBoolean()) { if (in.readBoolean()) {
settings = Settings.readSettingsFromStream(in); settings = Settings.readSettingsFromStream(in);
} }
if (in.readBoolean()) { os = in.readOptionalWriteable(OsInfo::new);
os = OsInfo.readOsInfo(in); process = in.readOptionalWriteable(ProcessInfo::new);
} jvm = in.readOptionalWriteable(JvmInfo::new);
if (in.readBoolean()) { threadPool = in.readOptionalWriteable(ThreadPoolInfo::new);
process = ProcessInfo.readProcessInfo(in); transport = in.readOptionalWriteable(TransportInfo::new);
} http = in.readOptionalWriteable(HttpInfo::new);
if (in.readBoolean()) { plugins = in.readOptionalWriteable(PluginsAndModules::new);
jvm = JvmInfo.readJvmInfo(in); ingest = in.readOptionalWriteable(IngestInfo::new);
}
if (in.readBoolean()) {
threadPool = ThreadPoolInfo.readThreadPoolInfo(in);
}
if (in.readBoolean()) {
transport = TransportInfo.readTransportInfo(in);
}
if (in.readBoolean()) {
http = HttpInfo.readHttpInfo(in);
}
if (in.readBoolean()) {
plugins = new PluginsAndModules();
plugins.readFrom(in);
}
if (in.readBoolean()) {
ingest = new IngestInfo(in);
}
} }
@Override @Override
@ -249,53 +228,13 @@ public class NodeInfo extends BaseNodeResponse {
out.writeBoolean(true); out.writeBoolean(true);
Settings.writeSettingsToStream(settings, out); Settings.writeSettingsToStream(settings, out);
} }
if (os == null) { out.writeOptionalWriteable(os);
out.writeBoolean(false); out.writeOptionalWriteable(process);
} else { out.writeOptionalWriteable(jvm);
out.writeBoolean(true); out.writeOptionalWriteable(threadPool);
os.writeTo(out); out.writeOptionalWriteable(transport);
} out.writeOptionalWriteable(http);
if (process == null) { out.writeOptionalWriteable(plugins);
out.writeBoolean(false); out.writeOptionalWriteable(ingest);
} else {
out.writeBoolean(true);
process.writeTo(out);
}
if (jvm == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
jvm.writeTo(out);
}
if (threadPool == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
threadPool.writeTo(out);
}
if (transport == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
transport.writeTo(out);
}
if (http == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
http.writeTo(out);
}
if (plugins == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
plugins.writeTo(out);
}
if (ingest == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
ingest.writeTo(out);
}
} }
} }

View File

@ -21,7 +21,7 @@ package org.elasticsearch.action.admin.cluster.node.info;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.plugins.PluginInfo; import org.elasticsearch.plugins.PluginInfo;
@ -34,13 +34,24 @@ import java.util.List;
/** /**
* Information about plugins and modules * Information about plugins and modules
*/ */
public class PluginsAndModules implements Streamable, ToXContent { public class PluginsAndModules implements Writeable, ToXContent {
private List<PluginInfo> plugins; private final List<PluginInfo> plugins;
private List<PluginInfo> modules; private final List<PluginInfo> modules;
public PluginsAndModules() { public PluginsAndModules(List<PluginInfo> plugins, List<PluginInfo> modules) {
plugins = new ArrayList<>(); this.plugins = Collections.unmodifiableList(plugins);
modules = new ArrayList<>(); this.modules = Collections.unmodifiableList(modules);
}
public PluginsAndModules(StreamInput in) throws IOException {
this.plugins = Collections.unmodifiableList(in.readList(PluginInfo::new));
this.modules = Collections.unmodifiableList(in.readList(PluginInfo::new));
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeList(plugins);
out.writeList(modules);
} }
/** /**
@ -69,33 +80,6 @@ public class PluginsAndModules implements Streamable, ToXContent {
modules.add(info); modules.add(info);
} }
@Override
public void readFrom(StreamInput in) throws IOException {
if (plugins.isEmpty() == false || modules.isEmpty() == false) {
throw new IllegalStateException("instance is already populated");
}
int plugins_size = in.readInt();
for (int i = 0; i < plugins_size; i++) {
plugins.add(PluginInfo.readFromStream(in));
}
int modules_size = in.readInt();
for (int i = 0; i < modules_size; i++) {
modules.add(PluginInfo.readFromStream(in));
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeInt(plugins.size());
for (PluginInfo plugin : getPluginInfos()) {
plugin.writeTo(out);
}
out.writeInt(modules.size());
for (PluginInfo module : getModuleInfos()) {
module.writeTo(out);
}
}
@Override @Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startArray("plugins"); builder.startArray("plugins");

View File

@ -212,7 +212,7 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
indices = NodeIndicesStats.readIndicesStats(in); indices = NodeIndicesStats.readIndicesStats(in);
} }
if (in.readBoolean()) { if (in.readBoolean()) {
os = OsStats.readOsStats(in); os = new OsStats(in);
} }
if (in.readBoolean()) { if (in.readBoolean()) {
process = ProcessStats.readProcessStats(in); process = ProcessStats.readProcessStats(in);

View File

@ -19,6 +19,9 @@
package org.elasticsearch.action.admin.cluster.reroute; package org.elasticsearch.action.admin.cluster.reroute;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.action.support.master.TransportMasterNodeAction;
@ -33,7 +36,6 @@ import org.elasticsearch.cluster.routing.allocation.RoutingExplanations;
import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Priority; import org.elasticsearch.common.Priority;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
@ -77,13 +79,13 @@ public class TransportClusterRerouteAction extends TransportMasterNodeAction<Clu
private final ClusterRerouteRequest request; private final ClusterRerouteRequest request;
private final ActionListener<ClusterRerouteResponse> listener; private final ActionListener<ClusterRerouteResponse> listener;
private final ESLogger logger; private final Logger logger;
private final AllocationService allocationService; private final AllocationService allocationService;
private volatile ClusterState clusterStateToSend; private volatile ClusterState clusterStateToSend;
private volatile RoutingExplanations explanations; private volatile RoutingExplanations explanations;
ClusterRerouteResponseAckedClusterStateUpdateTask(ESLogger logger, AllocationService allocationService, ClusterRerouteRequest request, ClusterRerouteResponseAckedClusterStateUpdateTask(Logger logger, AllocationService allocationService, ClusterRerouteRequest request,
ActionListener<ClusterRerouteResponse> listener) { ActionListener<ClusterRerouteResponse> listener) {
super(Priority.IMMEDIATE, request, listener); super(Priority.IMMEDIATE, request, listener);
this.request = request; this.request = request;
this.listener = listener; this.listener = listener;
@ -103,7 +105,7 @@ public class TransportClusterRerouteAction extends TransportMasterNodeAction<Clu
@Override @Override
public void onFailure(String source, Exception e) { public void onFailure(String source, Exception e) {
logger.debug("failed to perform [{}]", e, source); logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to perform [{}]", source), e);
super.onFailure(source, e); super.onFailure(source, e);
} }

View File

@ -19,6 +19,8 @@
package org.elasticsearch.action.admin.cluster.settings; package org.elasticsearch.action.admin.cluster.settings;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionFilters;
@ -148,7 +150,7 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
@Override @Override
public void onFailure(String source, Exception e) { public void onFailure(String source, Exception e) {
//if the reroute fails we only log //if the reroute fails we only log
logger.debug("failed to perform [{}]", e, source); logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to perform [{}]", source), e);
listener.onFailure(new ElasticsearchException("reroute after update settings failed", e)); listener.onFailure(new ElasticsearchException("reroute after update settings failed", e));
} }
@ -166,7 +168,7 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
@Override @Override
public void onFailure(String source, Exception e) { public void onFailure(String source, Exception e) {
logger.debug("failed to perform [{}]", e, source); logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to perform [{}]", source), e);
super.onFailure(source, e); super.onFailure(source, e);
} }

View File

@ -87,7 +87,7 @@ public class ClusterStatsNodes implements ToXContent {
} }
} }
this.counts = new Counts(nodeInfos); this.counts = new Counts(nodeInfos);
this.os = new OsStats(nodeInfos); this.os = new OsStats(nodeInfos, nodeStats);
this.process = new ProcessStats(nodeStats); this.process = new ProcessStats(nodeStats);
this.jvm = new JvmStats(nodeInfos, nodeStats); this.jvm = new JvmStats(nodeInfos, nodeStats);
this.networkTypes = new NetworkTypes(nodeInfos); this.networkTypes = new NetworkTypes(nodeInfos);
@ -226,11 +226,12 @@ public class ClusterStatsNodes implements ToXContent {
final int availableProcessors; final int availableProcessors;
final int allocatedProcessors; final int allocatedProcessors;
final ObjectIntHashMap<String> names; final ObjectIntHashMap<String> names;
final org.elasticsearch.monitor.os.OsStats.Mem mem;
/** /**
* Build the stats from information about each node. * Build the stats from information about each node.
*/ */
private OsStats(List<NodeInfo> nodeInfos) { private OsStats(List<NodeInfo> nodeInfos, List<NodeStats> nodeStatsList) {
this.names = new ObjectIntHashMap<>(); this.names = new ObjectIntHashMap<>();
int availableProcessors = 0; int availableProcessors = 0;
int allocatedProcessors = 0; int allocatedProcessors = 0;
@ -244,6 +245,22 @@ public class ClusterStatsNodes implements ToXContent {
} }
this.availableProcessors = availableProcessors; this.availableProcessors = availableProcessors;
this.allocatedProcessors = allocatedProcessors; this.allocatedProcessors = allocatedProcessors;
long totalMemory = 0;
long freeMemory = 0;
for (NodeStats nodeStats : nodeStatsList) {
if (nodeStats.getOs() != null) {
long total = nodeStats.getOs().getMem().getTotal().bytes();
if (total > 0) {
totalMemory += total;
}
long free = nodeStats.getOs().getMem().getFree().bytes();
if (free > 0) {
freeMemory += free;
}
}
}
this.mem = new org.elasticsearch.monitor.os.OsStats.Mem(totalMemory, freeMemory);
} }
public int getAvailableProcessors() { public int getAvailableProcessors() {
@ -254,6 +271,10 @@ public class ClusterStatsNodes implements ToXContent {
return allocatedProcessors; return allocatedProcessors;
} }
public org.elasticsearch.monitor.os.OsStats.Mem getMem() {
return mem;
}
static final class Fields { static final class Fields {
static final String AVAILABLE_PROCESSORS = "available_processors"; static final String AVAILABLE_PROCESSORS = "available_processors";
static final String ALLOCATED_PROCESSORS = "allocated_processors"; static final String ALLOCATED_PROCESSORS = "allocated_processors";
@ -274,6 +295,7 @@ public class ClusterStatsNodes implements ToXContent {
builder.endObject(); builder.endObject();
} }
builder.endArray(); builder.endArray();
mem.toXContent(builder, params);
return builder; return builder;
} }
} }

View File

@ -92,7 +92,7 @@ public class TransportClusterStatsAction extends TransportNodesAction<ClusterSta
@Override @Override
protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeRequest) { protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeRequest) {
NodeInfo nodeInfo = nodeService.info(true, true, false, true, false, true, false, true, false, false); NodeInfo nodeInfo = nodeService.info(true, true, false, true, false, true, false, true, false, false);
NodeStats nodeStats = nodeService.stats(CommonStatsFlags.NONE, false, true, true, false, true, false, false, false, false, false, false); NodeStats nodeStats = nodeService.stats(CommonStatsFlags.NONE, true, true, true, false, true, false, false, false, false, false, false);
List<ShardStats> shardsStats = new ArrayList<>(); List<ShardStats> shardsStats = new ArrayList<>();
for (IndexService indexService : indicesService) { for (IndexService indexService : indicesService) {
for (IndexShard indexShard : indexService) { for (IndexShard indexShard : indexService) {

View File

@ -19,6 +19,8 @@
package org.elasticsearch.action.admin.indices.close; package org.elasticsearch.action.admin.indices.close;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DestructiveOperations; import org.elasticsearch.action.support.DestructiveOperations;
@ -108,7 +110,7 @@ public class TransportCloseIndexAction extends TransportMasterNodeAction<CloseIn
@Override @Override
public void onFailure(Exception t) { public void onFailure(Exception t) {
logger.debug("failed to close indices [{}]", t, (Object)concreteIndices); logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to close indices [{}]", (Object) concreteIndices), t);
listener.onFailure(t); listener.onFailure(t);
} }
}); });

View File

@ -19,6 +19,8 @@
package org.elasticsearch.action.admin.indices.delete; package org.elasticsearch.action.admin.indices.delete;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DestructiveOperations; import org.elasticsearch.action.support.DestructiveOperations;
@ -100,7 +102,7 @@ public class TransportDeleteIndexAction extends TransportMasterNodeAction<Delete
@Override @Override
public void onFailure(Exception t) { public void onFailure(Exception t) {
logger.debug("failed to delete indices [{}]", t, concreteIndices); logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to delete indices [{}]", concreteIndices), t);
listener.onFailure(t); listener.onFailure(t);
} }
}); });

View File

@ -19,6 +19,8 @@
package org.elasticsearch.action.admin.indices.mapping.put; package org.elasticsearch.action.admin.indices.mapping.put;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.action.support.master.TransportMasterNodeAction;
@ -92,12 +94,12 @@ public class TransportPutMappingAction extends TransportMasterNodeAction<PutMapp
@Override @Override
public void onFailure(Exception t) { public void onFailure(Exception t) {
logger.debug("failed to put mappings on indices [{}], type [{}]", t, concreteIndices, request.type()); logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to put mappings on indices [{}], type [{}]", concreteIndices, request.type()), t);
listener.onFailure(t); listener.onFailure(t);
} }
}); });
} catch (IndexNotFoundException ex) { } catch (IndexNotFoundException ex) {
logger.debug("failed to put mappings on indices [{}], type [{}]", ex, request.indices(), request.type()); logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to put mappings on indices [{}], type [{}]", request.indices(), request.type()), ex);
throw ex; throw ex;
} }
} }

View File

@ -19,6 +19,8 @@
package org.elasticsearch.action.admin.indices.open; package org.elasticsearch.action.admin.indices.open;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DestructiveOperations; import org.elasticsearch.action.support.DestructiveOperations;
@ -93,7 +95,7 @@ public class TransportOpenIndexAction extends TransportMasterNodeAction<OpenInde
@Override @Override
public void onFailure(Exception t) { public void onFailure(Exception t) {
logger.debug("failed to open indices [{}]", t, (Object)concreteIndices); logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to open indices [{}]", (Object) concreteIndices), t);
listener.onFailure(t); listener.onFailure(t);
} }
}); });

View File

@ -19,6 +19,8 @@
package org.elasticsearch.action.admin.indices.settings.put; package org.elasticsearch.action.admin.indices.settings.put;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.action.support.master.TransportMasterNodeAction;
@ -92,7 +94,7 @@ public class TransportUpdateSettingsAction extends TransportMasterNodeAction<Upd
@Override @Override
public void onFailure(Exception t) { public void onFailure(Exception t) {
logger.debug("failed to update settings on indices [{}]", t, (Object)concreteIndices); logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to update settings on indices [{}]", (Object) concreteIndices), t);
listener.onFailure(t); listener.onFailure(t);
} }
}); });

View File

@ -18,6 +18,7 @@
*/ */
package org.elasticsearch.action.admin.indices.shards; package org.elasticsearch.action.admin.indices.shards;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.FailedNodeException;
@ -41,7 +42,6 @@ import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.collect.ImmutableOpenIntMap; import org.elasticsearch.common.collect.ImmutableOpenIntMap;
import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.common.util.concurrent.CountDown;
import org.elasticsearch.gateway.AsyncShardFetch; import org.elasticsearch.gateway.AsyncShardFetch;
@ -150,7 +150,7 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
private class InternalAsyncFetch extends AsyncShardFetch<NodeGatewayStartedShards> { private class InternalAsyncFetch extends AsyncShardFetch<NodeGatewayStartedShards> {
InternalAsyncFetch(ESLogger logger, String type, ShardId shardId, TransportNodesListGatewayStartedShards action) { InternalAsyncFetch(Logger logger, String type, ShardId shardId, TransportNodesListGatewayStartedShards action) {
super(logger, type, shardId, action); super(logger, type, shardId, action);
} }

View File

@ -18,6 +18,8 @@
*/ */
package org.elasticsearch.action.admin.indices.template.delete; package org.elasticsearch.action.admin.indices.template.delete;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.action.support.master.TransportMasterNodeAction;
@ -73,7 +75,7 @@ public class TransportDeleteIndexTemplateAction extends TransportMasterNodeActio
@Override @Override
public void onFailure(Exception e) { public void onFailure(Exception e) {
logger.debug("failed to delete templates [{}]", e, request.name()); logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to delete templates [{}]", request.name()), e);
listener.onFailure(e); listener.onFailure(e);
} }
}); });

View File

@ -18,6 +18,8 @@
*/ */
package org.elasticsearch.action.admin.indices.template.put; package org.elasticsearch.action.admin.indices.template.put;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.action.support.master.TransportMasterNodeAction;
@ -94,7 +96,7 @@ public class TransportPutIndexTemplateAction extends TransportMasterNodeAction<P
@Override @Override
public void onFailure(Exception e) { public void onFailure(Exception e) {
logger.debug("failed to put template [{}]", e, request.name()); logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to put template [{}]", request.name()), e);
listener.onFailure(e); listener.onFailure(e);
} }
}); });

View File

@ -19,6 +19,8 @@
package org.elasticsearch.action.admin.indices.upgrade.post; package org.elasticsearch.action.admin.indices.upgrade.post;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.action.support.master.TransportMasterNodeAction;
@ -79,7 +81,7 @@ public class TransportUpgradeSettingsAction extends TransportMasterNodeAction<Up
@Override @Override
public void onFailure(Exception t) { public void onFailure(Exception t) {
logger.debug("failed to upgrade minimum compatibility version settings on indices [{}]", t, request.versions().keySet()); logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to upgrade minimum compatibility version settings on indices [{}]", request.versions().keySet()), t);
listener.onFailure(t); listener.onFailure(t);
} }
}); });

View File

@ -18,9 +18,11 @@
*/ */
package org.elasticsearch.action.bulk; package org.elasticsearch.action.bulk;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.client.Client; import org.elasticsearch.client.Client;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
@ -31,7 +33,7 @@ import java.util.concurrent.TimeUnit;
* Abstracts the low-level details of bulk request handling * Abstracts the low-level details of bulk request handling
*/ */
abstract class BulkRequestHandler { abstract class BulkRequestHandler {
protected final ESLogger logger; protected final Logger logger;
protected final Client client; protected final Client client;
protected BulkRequestHandler(Client client) { protected BulkRequestHandler(Client client) {
@ -76,12 +78,12 @@ abstract class BulkRequestHandler {
listener.afterBulk(executionId, bulkRequest, bulkResponse); listener.afterBulk(executionId, bulkRequest, bulkResponse);
} catch (InterruptedException e) { } catch (InterruptedException e) {
Thread.currentThread().interrupt(); Thread.currentThread().interrupt();
logger.info("Bulk request {} has been cancelled.", e, executionId); logger.info((Supplier<?>) () -> new ParameterizedMessage("Bulk request {} has been cancelled.", executionId), e);
if (!afterCalled) { if (!afterCalled) {
listener.afterBulk(executionId, bulkRequest, e); listener.afterBulk(executionId, bulkRequest, e);
} }
} catch (Exception e) { } catch (Exception e) {
logger.warn("Failed to execute bulk request {}.", e, executionId); logger.warn((Supplier<?>) () -> new ParameterizedMessage("Failed to execute bulk request {}.", executionId), e);
if (!afterCalled) { if (!afterCalled) {
listener.afterBulk(executionId, bulkRequest, e); listener.afterBulk(executionId, bulkRequest, e);
} }
@ -142,10 +144,10 @@ abstract class BulkRequestHandler {
bulkRequestSetupSuccessful = true; bulkRequestSetupSuccessful = true;
} catch (InterruptedException e) { } catch (InterruptedException e) {
Thread.currentThread().interrupt(); Thread.currentThread().interrupt();
logger.info("Bulk request {} has been cancelled.", e, executionId); logger.info((Supplier<?>) () -> new ParameterizedMessage("Bulk request {} has been cancelled.", executionId), e);
listener.afterBulk(executionId, bulkRequest, e); listener.afterBulk(executionId, bulkRequest, e);
} catch (Exception e) { } catch (Exception e) {
logger.warn("Failed to execute bulk request {}.", e, executionId); logger.warn((Supplier<?>) () -> new ParameterizedMessage("Failed to execute bulk request {}.", executionId), e);
listener.afterBulk(executionId, bulkRequest, e); listener.afterBulk(executionId, bulkRequest, e);
} finally { } finally {
if (!bulkRequestSetupSuccessful && acquired) { // if we fail on client.bulk() release the semaphore if (!bulkRequestSetupSuccessful && acquired) { // if we fail on client.bulk() release the semaphore

View File

@ -20,6 +20,7 @@
package org.elasticsearch.action.bulk; package org.elasticsearch.action.bulk;
import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; import org.elasticsearch.action.support.replication.ReplicatedWriteRequest;
import org.elasticsearch.action.support.replication.ReplicationRequest;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
@ -101,4 +102,15 @@ public class BulkShardRequest extends ReplicatedWriteRequest<BulkShardRequest> {
} }
return b.toString(); return b.toString();
} }
@Override
public void onRetry() {
for (BulkItemRequest item : items) {
if (item.request() instanceof ReplicationRequest) {
// all replication requests need to be notified here as well to ie. make sure that internal optimizations are
// disabled see IndexRequest#canHaveDuplicates()
((ReplicationRequest) item.request()).onRetry();
}
}
}
} }

View File

@ -18,12 +18,12 @@
*/ */
package org.elasticsearch.action.bulk; package org.elasticsearch.action.bulk;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionFuture;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.client.Client; import org.elasticsearch.client.Client;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.common.util.concurrent.FutureUtils;
@ -89,7 +89,7 @@ public class Retry {
} }
static class AbstractRetryHandler implements ActionListener<BulkResponse> { static class AbstractRetryHandler implements ActionListener<BulkResponse> {
private final ESLogger logger; private final Logger logger;
private final Client client; private final Client client;
private final ActionListener<BulkResponse> listener; private final ActionListener<BulkResponse> listener;
private final Iterator<TimeValue> backoff; private final Iterator<TimeValue> backoff;

View File

@ -19,6 +19,8 @@
package org.elasticsearch.action.bulk; package org.elasticsearch.action.bulk;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequest;
@ -30,8 +32,8 @@ import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.index.TransportIndexAction; import org.elasticsearch.action.index.TransportIndexAction;
import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.replication.ReplicationRequest; import org.elasticsearch.action.support.replication.ReplicationRequest;
import org.elasticsearch.action.support.replication.TransportWriteAction;
import org.elasticsearch.action.support.replication.ReplicationResponse.ShardInfo; import org.elasticsearch.action.support.replication.ReplicationResponse.ShardInfo;
import org.elasticsearch.action.support.replication.TransportWriteAction;
import org.elasticsearch.action.update.UpdateHelper; import org.elasticsearch.action.update.UpdateHelper;
import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.action.update.UpdateResponse;
@ -183,9 +185,9 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
private <ReplicationRequestT extends ReplicationRequest<ReplicationRequestT>> void logFailure(Throwable t, String operation, ShardId shardId, ReplicationRequest<ReplicationRequestT> request) { private <ReplicationRequestT extends ReplicationRequest<ReplicationRequestT>> void logFailure(Throwable t, String operation, ShardId shardId, ReplicationRequest<ReplicationRequestT> request) {
if (ExceptionsHelper.status(t) == RestStatus.CONFLICT) { if (ExceptionsHelper.status(t) == RestStatus.CONFLICT) {
logger.trace("{} failed to execute bulk item ({}) {}", t, shardId, operation, request); logger.trace((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", shardId, operation, request), t);
} else { } else {
logger.debug("{} failed to execute bulk item ({}) {}", t, shardId, operation, request); logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", shardId, operation, request), t);
} }
} }

View File

@ -19,6 +19,8 @@
package org.elasticsearch.action.get; package org.elasticsearch.action.get;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.action.support.TransportActions;
@ -92,7 +94,7 @@ public class TransportShardMultiGetAction extends TransportSingleShardAction<Mul
if (TransportActions.isShardNotAvailableException(e)) { if (TransportActions.isShardNotAvailableException(e)) {
throw (ElasticsearchException) e; throw (ElasticsearchException) e;
} else { } else {
logger.debug("{} failed to execute multi_get for [{}]/[{}]", e, shardId, item.type(), item.id()); logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute multi_get for [{}]/[{}]", shardId, item.type(), item.id()), e);
response.add(request.locations.get(i), new MultiGetResponse.Failure(request.index(), item.type(), item.id(), e)); response.add(request.locations.get(i), new MultiGetResponse.Failure(request.index(), item.type(), item.id(), e));
} }
} }

View File

@ -72,7 +72,7 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
/** /**
* Operation type controls if the type of the index operation. * Operation type controls if the type of the index operation.
*/ */
public static enum OpType { public enum OpType {
/** /**
* Index the source. If there an existing document with the id, it will * Index the source. If there an existing document with the id, it will
* be replaced. * be replaced.
@ -152,6 +152,17 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
private String pipeline; private String pipeline;
/**
* Value for {@link #getAutoGeneratedTimestamp()} if the document has an external
* provided ID.
*/
public static final int UNSET_AUTO_GENERATED_TIMESTAMP = -1;
private long autoGeneratedTimestamp = UNSET_AUTO_GENERATED_TIMESTAMP;
private boolean isRetry = false;
public IndexRequest() { public IndexRequest() {
} }
@ -202,6 +213,10 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
} }
} }
if (opType() != OpType.INDEX && id == null) {
addValidationError("an id is required for a " + opType() + " operation", validationException);
}
if (!versionType.validateVersionForWrites(version)) { if (!versionType.validateVersionForWrites(version)) {
validationException = addValidationError("illegal version value [" + version + "] for version type [" + versionType.name() + "]", validationException); validationException = addValidationError("illegal version value [" + version + "] for version type [" + versionType.name() + "]", validationException);
} }
@ -216,6 +231,11 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
validationException = addValidationError("id is too long, must be no longer than 512 bytes but was: " + validationException = addValidationError("id is too long, must be no longer than 512 bytes but was: " +
id.getBytes(StandardCharsets.UTF_8).length, validationException); id.getBytes(StandardCharsets.UTF_8).length, validationException);
} }
if (id == null && (versionType == VersionType.INTERNAL && version == Versions.MATCH_ANY) == false) {
validationException = addValidationError("an id must be provided if version type or value are set", validationException);
}
return validationException; return validationException;
} }
@ -589,10 +609,10 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
} }
// generate id if not already provided and id generation is allowed // generate id if not already provided and id generation is allowed
if (allowIdGeneration) { if (allowIdGeneration && id == null) {
if (id == null) { assert autoGeneratedTimestamp == -1;
id(UUIDs.base64UUID()); autoGeneratedTimestamp = Math.max(0, System.currentTimeMillis()); // extra paranoia
} id(UUIDs.base64UUID());
} }
// generate timestamp if not provided, we always have one post this stage... // generate timestamp if not provided, we always have one post this stage...
@ -639,6 +659,8 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
version = in.readLong(); version = in.readLong();
versionType = VersionType.fromValue(in.readByte()); versionType = VersionType.fromValue(in.readByte());
pipeline = in.readOptionalString(); pipeline = in.readOptionalString();
isRetry = in.readBoolean();
autoGeneratedTimestamp = in.readLong();
} }
@Override @Override
@ -655,6 +677,8 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
out.writeLong(version); out.writeLong(version);
out.writeByte(versionType.getValue()); out.writeByte(versionType.getValue());
out.writeOptionalString(pipeline); out.writeOptionalString(pipeline);
out.writeBoolean(isRetry);
out.writeLong(autoGeneratedTimestamp);
} }
@Override @Override
@ -667,4 +691,25 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
} }
return "index {[" + index + "][" + type + "][" + id + "], source[" + sSource + "]}"; return "index {[" + index + "][" + type + "][" + id + "], source[" + sSource + "]}";
} }
/**
* Returns <code>true</code> if this request has been sent to a shard copy more than once.
*/
public boolean isRetry() {
return isRetry;
}
@Override
public void onRetry() {
isRetry = true;
}
/**
* Returns the timestamp the auto generated ID was created or {@value #UNSET_AUTO_GENERATED_TIMESTAMP} if the
* document has no auto generated timestamp. This method will return a positive value iff the id was auto generated.
*/
public long getAutoGeneratedTimestamp() {
return autoGeneratedTimestamp;
}
} }

View File

@ -205,15 +205,6 @@ public class IndexRequestBuilder extends ReplicationRequestBuilder<IndexRequest,
return this; return this;
} }
/**
* Sets a string representation of the {@link #setOpType(org.elasticsearch.action.index.IndexRequest.OpType)}. Can
* be either "index" or "create".
*/
public IndexRequestBuilder setOpType(String opType) {
request.opType(IndexRequest.OpType.fromString(opType));
return this;
}
/** /**
* Set to <tt>true</tt> to force this index to use {@link org.elasticsearch.action.index.IndexRequest.OpType#CREATE}. * Set to <tt>true</tt> to force this index to use {@link org.elasticsearch.action.index.IndexRequest.OpType#CREATE}.
*/ */

View File

@ -158,7 +158,7 @@ public class TransportIndexAction extends TransportWriteAction<IndexRequest, Ind
SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, shardId.getIndexName(), request.type(), request.id(), request.source()) SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, shardId.getIndexName(), request.type(), request.id(), request.source())
.routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl()); .routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl());
final Engine.Index operation = indexShard.prepareIndexOnReplica(sourceToParse, request.version(), request.versionType()); final Engine.Index operation = indexShard.prepareIndexOnReplica(sourceToParse, request.version(), request.versionType(), request.getAutoGeneratedTimestamp(), request.isRetry());
Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
if (update != null) { if (update != null) {
throw new RetryOnReplicaException(shardId, "Mappings are not available on the replica yet, triggered update: " + update); throw new RetryOnReplicaException(shardId, "Mappings are not available on the replica yet, triggered update: " + update);
@ -171,7 +171,7 @@ public class TransportIndexAction extends TransportWriteAction<IndexRequest, Ind
public static Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, IndexShard indexShard) { public static Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, IndexShard indexShard) {
SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.PRIMARY, request.index(), request.type(), request.id(), request.source()) SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.PRIMARY, request.index(), request.type(), request.id(), request.source())
.routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl()); .routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl());
return indexShard.prepareIndexOnPrimary(sourceToParse, request.version(), request.versionType()); return indexShard.prepareIndexOnPrimary(sourceToParse, request.version(), request.versionType(), request.getAutoGeneratedTimestamp(), request.isRetry());
} }
public static WriteResult<IndexResponse> executeIndexRequestOnPrimary(IndexRequest request, IndexShard indexShard, public static WriteResult<IndexResponse> executeIndexRequestOnPrimary(IndexRequest request, IndexShard indexShard,

View File

@ -19,6 +19,8 @@
package org.elasticsearch.action.ingest; package org.elasticsearch.action.ingest;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionResponse;
@ -90,7 +92,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio
void processIndexRequest(Task task, String action, ActionListener listener, ActionFilterChain chain, IndexRequest indexRequest) { void processIndexRequest(Task task, String action, ActionListener listener, ActionFilterChain chain, IndexRequest indexRequest) {
executionService.executeIndexRequest(indexRequest, t -> { executionService.executeIndexRequest(indexRequest, t -> {
logger.error("failed to execute pipeline [{}]", t, indexRequest.getPipeline()); logger.error((Supplier<?>) () -> new ParameterizedMessage("failed to execute pipeline [{}]", indexRequest.getPipeline()), t);
listener.onFailure(t); listener.onFailure(t);
}, success -> { }, success -> {
// TransportIndexAction uses IndexRequest and same action name on the node that receives the request and the node that // TransportIndexAction uses IndexRequest and same action name on the node that receives the request and the node that
@ -105,7 +107,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio
long ingestStartTimeInNanos = System.nanoTime(); long ingestStartTimeInNanos = System.nanoTime();
BulkRequestModifier bulkRequestModifier = new BulkRequestModifier(original); BulkRequestModifier bulkRequestModifier = new BulkRequestModifier(original);
executionService.executeBulkRequest(() -> bulkRequestModifier, (indexRequest, exception) -> { executionService.executeBulkRequest(() -> bulkRequestModifier, (indexRequest, exception) -> {
logger.debug("failed to execute pipeline [{}] for document [{}/{}/{}]", exception, indexRequest.getPipeline(), indexRequest.index(), indexRequest.type(), indexRequest.id()); logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to execute pipeline [{}] for document [{}/{}/{}]", indexRequest.getPipeline(), indexRequest.index(), indexRequest.type(), indexRequest.id()), exception);
bulkRequestModifier.markCurrentItemAsFailed(exception); bulkRequestModifier.markCurrentItemAsFailed(exception);
}, (exception) -> { }, (exception) -> {
if (exception != null) { if (exception != null) {

View File

@ -20,8 +20,10 @@
package org.elasticsearch.action.search; package org.elasticsearch.action.search;
import com.carrotsearch.hppc.IntArrayList; import com.carrotsearch.hppc.IntArrayList;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.NoShardAvailableActionException;
import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.action.support.TransportActions;
@ -35,7 +37,6 @@ import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchPhaseResult;
import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.SearchShardTarget;
@ -46,7 +47,6 @@ import org.elasticsearch.search.internal.InternalSearchResponse;
import org.elasticsearch.search.internal.ShardSearchTransportRequest; import org.elasticsearch.search.internal.ShardSearchTransportRequest;
import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.query.QuerySearchResult;
import org.elasticsearch.search.query.QuerySearchResultProvider; import org.elasticsearch.search.query.QuerySearchResultProvider;
import org.elasticsearch.search.suggest.Suggest;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import java.util.List; import java.util.List;
@ -58,7 +58,7 @@ import static org.elasticsearch.action.search.TransportSearchHelper.internalSear
abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult> extends AbstractAsyncAction { abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult> extends AbstractAsyncAction {
protected final ESLogger logger; protected final Logger logger;
protected final SearchTransportService searchTransportService; protected final SearchTransportService searchTransportService;
private final IndexNameExpressionResolver indexNameExpressionResolver; private final IndexNameExpressionResolver indexNameExpressionResolver;
protected final SearchPhaseController searchPhaseController; protected final SearchPhaseController searchPhaseController;
@ -77,7 +77,7 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
private final Object shardFailuresMutex = new Object(); private final Object shardFailuresMutex = new Object();
protected volatile ScoreDoc[] sortedShardDocs; protected volatile ScoreDoc[] sortedShardDocs;
protected AbstractSearchAsyncAction(ESLogger logger, SearchTransportService searchTransportService, ClusterService clusterService, protected AbstractSearchAsyncAction(Logger logger, SearchTransportService searchTransportService, ClusterService clusterService,
IndexNameExpressionResolver indexNameExpressionResolver, IndexNameExpressionResolver indexNameExpressionResolver,
SearchPhaseController searchPhaseController, ThreadPool threadPool, SearchRequest request, SearchPhaseController searchPhaseController, ThreadPool threadPool, SearchRequest request,
ActionListener<SearchResponse> listener) { ActionListener<SearchResponse> listener) {
@ -191,7 +191,12 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
innerMoveToSecondPhase(); innerMoveToSecondPhase();
} catch (Exception e) { } catch (Exception e) {
if (logger.isDebugEnabled()) { if (logger.isDebugEnabled()) {
logger.debug("{}: Failed to execute [{}] while moving to second phase", e, shardIt.shardId(), request); logger.debug(
(Supplier<?>) () -> new ParameterizedMessage(
"{}: Failed to execute [{}] while moving to second phase",
shardIt.shardId(),
request),
e);
} }
raiseEarlyFailure(new ReduceSearchPhaseException(firstPhaseName(), "", e, buildShardFailures())); raiseEarlyFailure(new ReduceSearchPhaseException(firstPhaseName(), "", e, buildShardFailures()));
} }
@ -211,15 +216,21 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
if (totalOps.incrementAndGet() == expectedTotalOps) { if (totalOps.incrementAndGet() == expectedTotalOps) {
if (logger.isDebugEnabled()) { if (logger.isDebugEnabled()) {
if (e != null && !TransportActions.isShardNotAvailableException(e)) { if (e != null && !TransportActions.isShardNotAvailableException(e)) {
logger.debug("{}: Failed to execute [{}]", e, shard != null ? shard.shortSummary() : shardIt.shardId(), request); logger.debug(
(Supplier<?>) () -> new ParameterizedMessage(
"{}: Failed to execute [{}]",
shard != null ? shard.shortSummary() :
shardIt.shardId(),
request),
e);
} else if (logger.isTraceEnabled()) { } else if (logger.isTraceEnabled()) {
logger.trace("{}: Failed to execute [{}]", e, shard, request); logger.trace((Supplier<?>) () -> new ParameterizedMessage("{}: Failed to execute [{}]", shard, request), e);
} }
} }
final ShardSearchFailure[] shardSearchFailures = buildShardFailures(); final ShardSearchFailure[] shardSearchFailures = buildShardFailures();
if (successfulOps.get() == 0) { if (successfulOps.get() == 0) {
if (logger.isDebugEnabled()) { if (logger.isDebugEnabled()) {
logger.debug("All shards failed for phase: [{}]", e, firstPhaseName()); logger.debug((Supplier<?>) () -> new ParameterizedMessage("All shards failed for phase: [{}]", firstPhaseName()), e);
} }
// no successful ops, raise an exception // no successful ops, raise an exception
@ -236,10 +247,13 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
final ShardRouting nextShard = shardIt.nextOrNull(); final ShardRouting nextShard = shardIt.nextOrNull();
final boolean lastShard = nextShard == null; final boolean lastShard = nextShard == null;
// trace log this exception // trace log this exception
if (logger.isTraceEnabled()) { logger.trace(
logger.trace("{}: Failed to execute [{}] lastShard [{}]", e, shard != null ? shard.shortSummary() : shardIt.shardId(), (Supplier<?>) () -> new ParameterizedMessage(
request, lastShard); "{}: Failed to execute [{}] lastShard [{}]",
} shard != null ? shard.shortSummary() : shardIt.shardId(),
request,
lastShard),
e);
if (!lastShard) { if (!lastShard) {
try { try {
performFirstPhase(shardIndex, shardIt, nextShard); performFirstPhase(shardIndex, shardIt, nextShard);
@ -251,8 +265,14 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
// no more shards active, add a failure // no more shards active, add a failure
if (logger.isDebugEnabled() && !logger.isTraceEnabled()) { // do not double log this exception if (logger.isDebugEnabled() && !logger.isTraceEnabled()) { // do not double log this exception
if (e != null && !TransportActions.isShardNotAvailableException(e)) { if (e != null && !TransportActions.isShardNotAvailableException(e)) {
logger.debug("{}: Failed to execute [{}] lastShard [{}]", e, logger.debug(
shard != null ? shard.shortSummary() : shardIt.shardId(), request, lastShard); (Supplier<?>) () -> new ParameterizedMessage(
"{}: Failed to execute [{}] lastShard [{}]",
shard != null ? shard.shortSummary() :
shardIt.shardId(),
request,
lastShard),
e);
} }
} }
} }

View File

@ -19,12 +19,14 @@
package org.elasticsearch.action.search; package org.elasticsearch.action.search;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.ActionRunnable;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.action.SearchTransportService; import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController; import org.elasticsearch.search.controller.SearchPhaseController;
@ -43,7 +45,7 @@ class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<DfsSea
private final AtomicArray<QueryFetchSearchResult> queryFetchResults; private final AtomicArray<QueryFetchSearchResult> queryFetchResults;
SearchDfsQueryAndFetchAsyncAction(ESLogger logger, SearchTransportService searchTransportService, SearchDfsQueryAndFetchAsyncAction(Logger logger, SearchTransportService searchTransportService,
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
SearchPhaseController searchPhaseController, ThreadPool threadPool, SearchPhaseController searchPhaseController, ThreadPool threadPool,
SearchRequest request, ActionListener<SearchResponse> listener) { SearchRequest request, ActionListener<SearchResponse> listener) {
@ -105,7 +107,7 @@ class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<DfsSea
void onSecondPhaseFailure(Exception e, QuerySearchRequest querySearchRequest, int shardIndex, DfsSearchResult dfsResult, void onSecondPhaseFailure(Exception e, QuerySearchRequest querySearchRequest, int shardIndex, DfsSearchResult dfsResult,
AtomicInteger counter) { AtomicInteger counter) {
if (logger.isDebugEnabled()) { if (logger.isDebugEnabled()) {
logger.debug("[{}] Failed to execute query phase", e, querySearchRequest.id()); logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute query phase", querySearchRequest.id()), e);
} }
this.addShardFailure(shardIndex, dfsResult.shardTarget(), e); this.addShardFailure(shardIndex, dfsResult.shardTarget(), e);
successfulOps.decrementAndGet(); successfulOps.decrementAndGet();

View File

@ -20,13 +20,15 @@
package org.elasticsearch.action.search; package org.elasticsearch.action.search;
import com.carrotsearch.hppc.IntArrayList; import com.carrotsearch.hppc.IntArrayList;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.ScoreDoc;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.ActionRunnable;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.action.SearchTransportService; import org.elasticsearch.search.action.SearchTransportService;
@ -50,7 +52,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<DfsSe
final AtomicArray<FetchSearchResult> fetchResults; final AtomicArray<FetchSearchResult> fetchResults;
final AtomicArray<IntArrayList> docIdsToLoad; final AtomicArray<IntArrayList> docIdsToLoad;
SearchDfsQueryThenFetchAsyncAction(ESLogger logger, SearchTransportService searchTransportService, SearchDfsQueryThenFetchAsyncAction(Logger logger, SearchTransportService searchTransportService,
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
SearchPhaseController searchPhaseController, ThreadPool threadPool, SearchPhaseController searchPhaseController, ThreadPool threadPool,
SearchRequest request, ActionListener<SearchResponse> listener) { SearchRequest request, ActionListener<SearchResponse> listener) {
@ -113,7 +115,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<DfsSe
void onQueryFailure(Exception e, QuerySearchRequest querySearchRequest, int shardIndex, DfsSearchResult dfsResult, void onQueryFailure(Exception e, QuerySearchRequest querySearchRequest, int shardIndex, DfsSearchResult dfsResult,
AtomicInteger counter) { AtomicInteger counter) {
if (logger.isDebugEnabled()) { if (logger.isDebugEnabled()) {
logger.debug("[{}] Failed to execute query phase", e, querySearchRequest.id()); logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute query phase", querySearchRequest.id()), e);
} }
this.addShardFailure(shardIndex, dfsResult.shardTarget(), e); this.addShardFailure(shardIndex, dfsResult.shardTarget(), e);
successfulOps.decrementAndGet(); successfulOps.decrementAndGet();
@ -182,7 +184,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<DfsSe
void onFetchFailure(Exception e, ShardFetchSearchRequest fetchSearchRequest, int shardIndex, void onFetchFailure(Exception e, ShardFetchSearchRequest fetchSearchRequest, int shardIndex,
SearchShardTarget shardTarget, AtomicInteger counter) { SearchShardTarget shardTarget, AtomicInteger counter) {
if (logger.isDebugEnabled()) { if (logger.isDebugEnabled()) {
logger.debug("[{}] Failed to execute fetch phase", e, fetchSearchRequest.id()); logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute fetch phase", fetchSearchRequest.id()), e);
} }
this.addShardFailure(shardIndex, shardTarget, e); this.addShardFailure(shardIndex, shardTarget, e);
successfulOps.decrementAndGet(); successfulOps.decrementAndGet();

View File

@ -19,12 +19,12 @@
package org.elasticsearch.action.search; package org.elasticsearch.action.search;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.ActionRunnable;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.search.action.SearchTransportService; import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController; import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.fetch.QueryFetchSearchResult; import org.elasticsearch.search.fetch.QueryFetchSearchResult;
@ -36,7 +36,7 @@ import java.io.IOException;
class SearchQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<QueryFetchSearchResult> { class SearchQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<QueryFetchSearchResult> {
SearchQueryAndFetchAsyncAction(ESLogger logger, SearchTransportService searchTransportService, SearchQueryAndFetchAsyncAction(Logger logger, SearchTransportService searchTransportService,
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
SearchPhaseController searchPhaseController, ThreadPool threadPool, SearchPhaseController searchPhaseController, ThreadPool threadPool,
SearchRequest request, ActionListener<SearchResponse> listener) { SearchRequest request, ActionListener<SearchResponse> listener) {

View File

@ -20,13 +20,15 @@
package org.elasticsearch.action.search; package org.elasticsearch.action.search;
import com.carrotsearch.hppc.IntArrayList; import com.carrotsearch.hppc.IntArrayList;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.ScoreDoc;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.ActionRunnable;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.action.SearchTransportService; import org.elasticsearch.search.action.SearchTransportService;
@ -46,7 +48,7 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<QuerySea
final AtomicArray<FetchSearchResult> fetchResults; final AtomicArray<FetchSearchResult> fetchResults;
final AtomicArray<IntArrayList> docIdsToLoad; final AtomicArray<IntArrayList> docIdsToLoad;
SearchQueryThenFetchAsyncAction(ESLogger logger, SearchTransportService searchService, SearchQueryThenFetchAsyncAction(Logger logger, SearchTransportService searchService,
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
SearchPhaseController searchPhaseController, ThreadPool threadPool, SearchPhaseController searchPhaseController, ThreadPool threadPool,
SearchRequest request, ActionListener<SearchResponse> listener) { SearchRequest request, ActionListener<SearchResponse> listener) {
@ -115,7 +117,7 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<QuerySea
void onFetchFailure(Exception e, ShardFetchSearchRequest fetchSearchRequest, int shardIndex, SearchShardTarget shardTarget, void onFetchFailure(Exception e, ShardFetchSearchRequest fetchSearchRequest, int shardIndex, SearchShardTarget shardTarget,
AtomicInteger counter) { AtomicInteger counter) {
if (logger.isDebugEnabled()) { if (logger.isDebugEnabled()) {
logger.debug("[{}] Failed to execute fetch phase", e, fetchSearchRequest.id()); logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute fetch phase", fetchSearchRequest.id()), e);
} }
this.addShardFailure(shardIndex, shardTarget, e); this.addShardFailure(shardIndex, shardTarget, e);
successfulOps.decrementAndGet(); successfulOps.decrementAndGet();

View File

@ -19,12 +19,14 @@
package org.elasticsearch.action.search; package org.elasticsearch.action.search;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.ScoreDoc;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.action.SearchTransportService; import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController; import org.elasticsearch.search.controller.SearchPhaseController;
@ -40,7 +42,7 @@ import static org.elasticsearch.action.search.TransportSearchHelper.internalScro
class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction { class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction {
private final ESLogger logger; private final Logger logger;
private final SearchPhaseController searchPhaseController; private final SearchPhaseController searchPhaseController;
private final SearchTransportService searchTransportService; private final SearchTransportService searchTransportService;
private final SearchScrollRequest request; private final SearchScrollRequest request;
@ -52,7 +54,7 @@ class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction {
private final AtomicInteger successfulOps; private final AtomicInteger successfulOps;
private final AtomicInteger counter; private final AtomicInteger counter;
SearchScrollQueryAndFetchAsyncAction(ESLogger logger, ClusterService clusterService, SearchScrollQueryAndFetchAsyncAction(Logger logger, ClusterService clusterService,
SearchTransportService searchTransportService, SearchPhaseController searchPhaseController, SearchTransportService searchTransportService, SearchPhaseController searchPhaseController,
SearchScrollRequest request, ParsedScrollId scrollId, ActionListener<SearchResponse> listener) { SearchScrollRequest request, ParsedScrollId scrollId, ActionListener<SearchResponse> listener) {
this.logger = logger; this.logger = logger;
@ -146,7 +148,7 @@ class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction {
private void onPhaseFailure(Exception e, long searchId, int shardIndex) { private void onPhaseFailure(Exception e, long searchId, int shardIndex) {
if (logger.isDebugEnabled()) { if (logger.isDebugEnabled()) {
logger.debug("[{}] Failed to execute query phase", e, searchId); logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute query phase", searchId), e);
} }
addShardFailure(shardIndex, new ShardSearchFailure(e)); addShardFailure(shardIndex, new ShardSearchFailure(e));
successfulOps.decrementAndGet(); successfulOps.decrementAndGet();

View File

@ -20,12 +20,14 @@
package org.elasticsearch.action.search; package org.elasticsearch.action.search;
import com.carrotsearch.hppc.IntArrayList; import com.carrotsearch.hppc.IntArrayList;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.ScoreDoc;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.action.SearchTransportService; import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController; import org.elasticsearch.search.controller.SearchPhaseController;
@ -43,7 +45,7 @@ import static org.elasticsearch.action.search.TransportSearchHelper.internalScro
class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction { class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
private final ESLogger logger; private final Logger logger;
private final SearchTransportService searchTransportService; private final SearchTransportService searchTransportService;
private final SearchPhaseController searchPhaseController; private final SearchPhaseController searchPhaseController;
private final SearchScrollRequest request; private final SearchScrollRequest request;
@ -56,7 +58,7 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
private volatile ScoreDoc[] sortedShardDocs; private volatile ScoreDoc[] sortedShardDocs;
private final AtomicInteger successfulOps; private final AtomicInteger successfulOps;
SearchScrollQueryThenFetchAsyncAction(ESLogger logger, ClusterService clusterService, SearchScrollQueryThenFetchAsyncAction(Logger logger, ClusterService clusterService,
SearchTransportService searchTransportService, SearchPhaseController searchPhaseController, SearchTransportService searchTransportService, SearchPhaseController searchPhaseController,
SearchScrollRequest request, ParsedScrollId scrollId, ActionListener<SearchResponse> listener) { SearchScrollRequest request, ParsedScrollId scrollId, ActionListener<SearchResponse> listener) {
this.logger = logger; this.logger = logger;
@ -146,7 +148,7 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
void onQueryPhaseFailure(final int shardIndex, final AtomicInteger counter, final long searchId, Exception failure) { void onQueryPhaseFailure(final int shardIndex, final AtomicInteger counter, final long searchId, Exception failure) {
if (logger.isDebugEnabled()) { if (logger.isDebugEnabled()) {
logger.debug("[{}] Failed to execute query phase", failure, searchId); logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute query phase", searchId), failure);
} }
addShardFailure(shardIndex, new ShardSearchFailure(failure)); addShardFailure(shardIndex, new ShardSearchFailure(failure));
successfulOps.decrementAndGet(); successfulOps.decrementAndGet();

View File

@ -19,6 +19,8 @@
package org.elasticsearch.action.search; package org.elasticsearch.action.search;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.HandledTransportAction;
@ -144,7 +146,7 @@ public class TransportClearScrollAction extends HandledTransportAction<ClearScro
} }
void onFailedFreedContext(Throwable e, DiscoveryNode node) { void onFailedFreedContext(Throwable e, DiscoveryNode node) {
logger.warn("Clear SC failed on node[{}]", e, node); logger.warn((Supplier<?>) () -> new ParameterizedMessage("Clear SC failed on node[{}]", node), e);
if (expectedOps.countDown()) { if (expectedOps.countDown()) {
listener.onResponse(new ClearScrollResponse(false, numberOfFreedSearchContexts.get())); listener.onResponse(new ClearScrollResponse(false, numberOfFreedSearchContexts.get()));
} else { } else {

View File

@ -19,9 +19,9 @@
package org.elasticsearch.action.support; package org.elasticsearch.action.support;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ListenableActionFuture; import org.elasticsearch.action.ListenableActionFuture;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
@ -33,7 +33,7 @@ import java.util.List;
*/ */
public abstract class AbstractListenableActionFuture<T, L> extends AdapterActionFuture<T, L> implements ListenableActionFuture<T> { public abstract class AbstractListenableActionFuture<T, L> extends AdapterActionFuture<T, L> implements ListenableActionFuture<T> {
private static final ESLogger logger = Loggers.getLogger(AbstractListenableActionFuture.class); private static final Logger logger = Loggers.getLogger(AbstractListenableActionFuture.class);
final ThreadPool threadPool; final ThreadPool threadPool;
volatile Object listeners; volatile Object listeners;

View File

@ -24,8 +24,8 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
@ -41,16 +41,17 @@ import java.util.List;
public final class AutoCreateIndex { public final class AutoCreateIndex {
public static final Setting<AutoCreate> AUTO_CREATE_INDEX_SETTING = public static final Setting<AutoCreate> AUTO_CREATE_INDEX_SETTING =
new Setting<>("action.auto_create_index", "true", AutoCreate::new, Property.NodeScope); new Setting<>("action.auto_create_index", "true", AutoCreate::new, Property.NodeScope, Setting.Property.Dynamic);
private final boolean dynamicMappingDisabled; private final boolean dynamicMappingDisabled;
private final IndexNameExpressionResolver resolver; private final IndexNameExpressionResolver resolver;
private final AutoCreate autoCreate; private volatile AutoCreate autoCreate;
public AutoCreateIndex(Settings settings, IndexNameExpressionResolver resolver) { public AutoCreateIndex(Settings settings, ClusterSettings clusterSettings, IndexNameExpressionResolver resolver) {
this.resolver = resolver; this.resolver = resolver;
dynamicMappingDisabled = !MapperService.INDEX_MAPPER_DYNAMIC_SETTING.get(settings); dynamicMappingDisabled = !MapperService.INDEX_MAPPER_DYNAMIC_SETTING.get(settings);
this.autoCreate = AUTO_CREATE_INDEX_SETTING.get(settings); this.autoCreate = AUTO_CREATE_INDEX_SETTING.get(settings);
clusterSettings.addSettingsUpdateConsumer(AUTO_CREATE_INDEX_SETTING, this::setAutoCreate);
} }
/** /**
@ -64,6 +65,8 @@ public final class AutoCreateIndex {
* Should the index be auto created? * Should the index be auto created?
*/ */
public boolean shouldAutoCreate(String index, ClusterState state) { public boolean shouldAutoCreate(String index, ClusterState state) {
// One volatile read, so that all checks are done against the same instance:
final AutoCreate autoCreate = this.autoCreate;
if (autoCreate.autoCreateIndex == false) { if (autoCreate.autoCreateIndex == false) {
return false; return false;
} }
@ -87,7 +90,15 @@ public final class AutoCreateIndex {
return false; return false;
} }
private static class AutoCreate { AutoCreate getAutoCreate() {
return autoCreate;
}
void setAutoCreate(AutoCreate autoCreate) {
this.autoCreate = autoCreate;
}
static class AutoCreate {
private final boolean autoCreateIndex; private final boolean autoCreateIndex;
private final List<Tuple<String, Boolean>> expressions; private final List<Tuple<String, Boolean>> expressions;
@ -128,5 +139,13 @@ public final class AutoCreateIndex {
this.expressions = expressions; this.expressions = expressions;
this.autoCreateIndex = autoCreateIndex; this.autoCreateIndex = autoCreateIndex;
} }
boolean isAutoCreateIndex() {
return autoCreateIndex;
}
List<Tuple<String, Boolean>> getExpressions() {
return expressions;
}
} }
} }

View File

@ -18,6 +18,7 @@
*/ */
package org.elasticsearch.action.support; package org.elasticsearch.action.support;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionResponse;
@ -75,8 +76,13 @@ public abstract class HandledTransportAction<Request extends ActionRequest<Reque
try { try {
channel.sendResponse(e); channel.sendResponse(e);
} catch (Exception e1) { } catch (Exception e1) {
logger.warn("Failed to send error response for action [{}] and request [{}]", e1, logger.warn(
actionName, request); (org.apache.logging.log4j.util.Supplier<?>)
() -> new ParameterizedMessage(
"Failed to send error response for action [{}] and request [{}]",
actionName,
request),
e1);
} }
} }
}); });

View File

@ -19,10 +19,12 @@
package org.elasticsearch.action.support; package org.elasticsearch.action.support;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.client.Client; import org.elasticsearch.client.Client;
import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
@ -39,12 +41,12 @@ public final class ThreadedActionListener<Response> implements ActionListener<Re
*/ */
public static class Wrapper { public static class Wrapper {
private final ESLogger logger; private final Logger logger;
private final ThreadPool threadPool; private final ThreadPool threadPool;
private final boolean threadedListener; private final boolean threadedListener;
public Wrapper(ESLogger logger, Settings settings, ThreadPool threadPool) { public Wrapper(Logger logger, Settings settings, ThreadPool threadPool) {
this.logger = logger; this.logger = logger;
this.threadPool = threadPool; this.threadPool = threadPool;
// Should the action listener be threaded or not by default. Action listeners are automatically threaded for // Should the action listener be threaded or not by default. Action listeners are automatically threaded for
@ -68,13 +70,13 @@ public final class ThreadedActionListener<Response> implements ActionListener<Re
} }
} }
private final ESLogger logger; private final Logger logger;
private final ThreadPool threadPool; private final ThreadPool threadPool;
private final String executor; private final String executor;
private final ActionListener<Response> listener; private final ActionListener<Response> listener;
private final boolean forceExecution; private final boolean forceExecution;
public ThreadedActionListener(ESLogger logger, ThreadPool threadPool, String executor, ActionListener<Response> listener, public ThreadedActionListener(Logger logger, ThreadPool threadPool, String executor, ActionListener<Response> listener,
boolean forceExecution) { boolean forceExecution) {
this.logger = logger; this.logger = logger;
this.threadPool = threadPool; this.threadPool = threadPool;
@ -118,7 +120,8 @@ public final class ThreadedActionListener<Response> implements ActionListener<Re
@Override @Override
public void onFailure(Exception e) { public void onFailure(Exception e) {
logger.warn("failed to execute failure callback on [{}], failure [{}]", e, listener, e); logger.warn(
(Supplier<?>) () -> new ParameterizedMessage("failed to execute failure callback on [{}]", listener), e);
} }
}); });
} }

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.support; package org.elasticsearch.action.support;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionFuture;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequest;
@ -27,7 +28,6 @@ import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskListener; import org.elasticsearch.tasks.TaskListener;
@ -165,9 +165,9 @@ public abstract class TransportAction<Request extends ActionRequest<Request>, Re
private final TransportAction<Request, Response> action; private final TransportAction<Request, Response> action;
private final AtomicInteger index = new AtomicInteger(); private final AtomicInteger index = new AtomicInteger();
private final ESLogger logger; private final Logger logger;
private RequestFilterChain(TransportAction<Request, Response> action, ESLogger logger) { private RequestFilterChain(TransportAction<Request, Response> action, Logger logger) {
this.action = action; this.action = action;
this.logger = logger; this.logger = logger;
} }
@ -201,9 +201,9 @@ public abstract class TransportAction<Request extends ActionRequest<Request>, Re
private final ActionFilter[] filters; private final ActionFilter[] filters;
private final AtomicInteger index; private final AtomicInteger index;
private final ESLogger logger; private final Logger logger;
private ResponseFilterChain(ActionFilter[] filters, ESLogger logger) { private ResponseFilterChain(ActionFilter[] filters, Logger logger) {
this.filters = filters; this.filters = filters;
this.index = new AtomicInteger(filters.length); this.index = new AtomicInteger(filters.length);
this.logger = logger; this.logger = logger;

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.support.broadcast; package org.elasticsearch.action.support.broadcast;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.NoShardAvailableActionException;
import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionFilters;
@ -37,10 +38,10 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
@ -224,7 +225,13 @@ public abstract class TransportBroadcastAction<Request extends BroadcastRequest<
if (e != null) { if (e != null) {
if (logger.isTraceEnabled()) { if (logger.isTraceEnabled()) {
if (!TransportActions.isShardNotAvailableException(e)) { if (!TransportActions.isShardNotAvailableException(e)) {
logger.trace("{}: failed to execute [{}]", e, shard != null ? shard.shortSummary() : shardIt.shardId(), request); logger.trace(
(org.apache.logging.log4j.util.Supplier<?>)
() -> new ParameterizedMessage(
"{}: failed to execute [{}]",
shard != null ? shard.shortSummary() : shardIt.shardId(),
request),
e);
} }
} }
} }
@ -233,7 +240,13 @@ public abstract class TransportBroadcastAction<Request extends BroadcastRequest<
if (logger.isDebugEnabled()) { if (logger.isDebugEnabled()) {
if (e != null) { if (e != null) {
if (!TransportActions.isShardNotAvailableException(e)) { if (!TransportActions.isShardNotAvailableException(e)) {
logger.debug("{}: failed to execute [{}]", e, shard != null ? shard.shortSummary() : shardIt.shardId(), request); logger.debug(
(org.apache.logging.log4j.util.Supplier<?>)
() -> new ParameterizedMessage(
"{}: failed to execute [{}]",
shard != null ? shard.shortSummary() : shardIt.shardId(),
request),
e);
} }
} }
} }

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.support.broadcast.node; package org.elasticsearch.action.support.broadcast.node;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.IndicesRequest;
@ -46,13 +47,13 @@ import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.NodeShouldNotConnectException; import org.elasticsearch.transport.NodeShouldNotConnectException;
import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
import java.io.IOException; import java.io.IOException;
@ -363,7 +364,9 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
protected void onNodeFailure(DiscoveryNode node, int nodeIndex, Throwable t) { protected void onNodeFailure(DiscoveryNode node, int nodeIndex, Throwable t) {
String nodeId = node.getId(); String nodeId = node.getId();
if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) { if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) {
logger.debug("failed to execute [{}] on node [{}]", t, actionName, nodeId); logger.debug(
(org.apache.logging.log4j.util.Supplier<?>)
() -> new ParameterizedMessage("failed to execute [{}] on node [{}]", actionName, nodeId), t);
} }
// this is defensive to protect against the possibility of double invocation // this is defensive to protect against the possibility of double invocation
@ -441,11 +444,23 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
shardResults[shardIndex] = failure; shardResults[shardIndex] = failure;
if (TransportActions.isShardNotAvailableException(e)) { if (TransportActions.isShardNotAvailableException(e)) {
if (logger.isTraceEnabled()) { if (logger.isTraceEnabled()) {
logger.trace("[{}] failed to execute operation for shard [{}]", e, actionName, shardRouting.shortSummary()); logger.trace(
(org.apache.logging.log4j.util.Supplier<?>)
() -> new ParameterizedMessage(
"[{}] failed to execute operation for shard [{}]",
actionName,
shardRouting.shortSummary()),
e);
} }
} else { } else {
if (logger.isDebugEnabled()) { if (logger.isDebugEnabled()) {
logger.debug("[{}] failed to execute operation for shard [{}]", e, actionName, shardRouting.shortSummary()); logger.debug(
(org.apache.logging.log4j.util.Supplier<?>)
() -> new ParameterizedMessage(
"[{}] failed to execute operation for shard [{}]",
actionName,
shardRouting.shortSummary()),
e);
} }
} }
} }

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.support.master; package org.elasticsearch.action.support.master;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.ActionListenerResponseHandler;
import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionResponse;
@ -155,7 +156,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
public void onFailure(Exception t) { public void onFailure(Exception t) {
if (t instanceof Discovery.FailedToCommitClusterStateException if (t instanceof Discovery.FailedToCommitClusterStateException
|| (t instanceof NotMasterException)) { || (t instanceof NotMasterException)) {
logger.debug("master could not publish cluster state or stepped down before publishing action [{}], scheduling a retry", t, actionName); logger.debug((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("master could not publish cluster state or stepped down before publishing action [{}], scheduling a retry", actionName), t);
retry(t, MasterNodeChangePredicate.INSTANCE); retry(t, MasterNodeChangePredicate.INSTANCE);
} else { } else {
listener.onFailure(t); listener.onFailure(t);
@ -209,7 +210,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
@Override @Override
public void onTimeout(TimeValue timeout) { public void onTimeout(TimeValue timeout) {
logger.debug("timed out while retrying [{}] after failure (timeout [{}])", failure, actionName, timeout); logger.debug((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("timed out while retrying [{}] after failure (timeout [{}])", actionName, timeout), failure);
listener.onFailure(new MasterNotDiscoveredException(failure)); listener.onFailure(new MasterNotDiscoveredException(failure));
} }
}, changePredicate }, changePredicate

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.support.nodes; package org.elasticsearch.action.support.nodes;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.NoSuchNodeException; import org.elasticsearch.action.NoSuchNodeException;
@ -31,13 +32,13 @@ import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.NodeShouldNotConnectException; import org.elasticsearch.transport.NodeShouldNotConnectException;
import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportRequestOptions;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
import java.util.ArrayList; import java.util.ArrayList;
@ -238,7 +239,9 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
private void onFailure(int idx, String nodeId, Throwable t) { private void onFailure(int idx, String nodeId, Throwable t) {
if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) { if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) {
logger.debug("failed to execute on node [{}]", t, nodeId); logger.debug(
(org.apache.logging.log4j.util.Supplier<?>)
() -> new ParameterizedMessage("failed to execute on node [{}]", nodeId), t);
} }
if (accumulateExceptions()) { if (accumulateExceptions()) {
responses.set(idx, new FailedNodeException(nodeId, "Failed node [" + nodeId + "]", t)); responses.set(idx, new FailedNodeException(nodeId, "Failed node [" + nodeId + "]", t));

View File

@ -18,6 +18,8 @@
*/ */
package org.elasticsearch.action.support.replication; package org.elasticsearch.action.support.replication;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
@ -31,7 +33,6 @@ import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.engine.VersionConflictEngineException;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
@ -56,7 +57,7 @@ public class ReplicationOperation<
ReplicaRequest extends ReplicationRequest<ReplicaRequest>, ReplicaRequest extends ReplicationRequest<ReplicaRequest>,
PrimaryResultT extends ReplicationOperation.PrimaryResult<ReplicaRequest> PrimaryResultT extends ReplicationOperation.PrimaryResult<ReplicaRequest>
> { > {
private final ESLogger logger; private final Logger logger;
private final Request request; private final Request request;
private final Supplier<ClusterState> clusterStateSupplier; private final Supplier<ClusterState> clusterStateSupplier;
private final String opType; private final String opType;
@ -86,7 +87,7 @@ public class ReplicationOperation<
public ReplicationOperation(Request request, Primary<Request, ReplicaRequest, PrimaryResultT> primary, public ReplicationOperation(Request request, Primary<Request, ReplicaRequest, PrimaryResultT> primary,
ActionListener<PrimaryResultT> listener, ActionListener<PrimaryResultT> listener,
boolean executeOnReplicas, Replicas<ReplicaRequest> replicas, boolean executeOnReplicas, Replicas<ReplicaRequest> replicas,
Supplier<ClusterState> clusterStateSupplier, ESLogger logger, String opType) { Supplier<ClusterState> clusterStateSupplier, Logger logger, String opType) {
this.executeOnReplicas = executeOnReplicas; this.executeOnReplicas = executeOnReplicas;
this.replicasProxy = replicas; this.replicasProxy = replicas;
this.primary = primary; this.primary = primary;
@ -189,8 +190,14 @@ public class ReplicationOperation<
@Override @Override
public void onFailure(Exception replicaException) { public void onFailure(Exception replicaException) {
logger.trace("[{}] failure while performing [{}] on replica {}, request [{}]", replicaException, shard.shardId(), opType, logger.trace(
shard, replicaRequest); (org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage(
"[{}] failure while performing [{}] on replica {}, request [{}]",
shard.shardId(),
opType,
shard,
replicaRequest),
replicaException);
if (ignoreReplicaException(replicaException)) { if (ignoreReplicaException(replicaException)) {
decPendingAndFinishIfNeeded(); decPendingAndFinishIfNeeded();
} else { } else {
@ -198,7 +205,9 @@ public class ReplicationOperation<
shardReplicaFailures.add(new ReplicationResponse.ShardInfo.Failure( shardReplicaFailures.add(new ReplicationResponse.ShardInfo.Failure(
shard.shardId(), shard.currentNodeId(), replicaException, restStatus, false)); shard.shardId(), shard.currentNodeId(), replicaException, restStatus, false));
String message = String.format(Locale.ROOT, "failed to perform %s on replica %s", opType, shard); String message = String.format(Locale.ROOT, "failed to perform %s on replica %s", opType, shard);
logger.warn("[{}] {}", replicaException, shard.shardId(), message); logger.warn(
(org.apache.logging.log4j.util.Supplier<?>)
() -> new ParameterizedMessage("[{}] {}", shard.shardId(), message), replicaException);
replicasProxy.failShard(shard, replicaRequest.primaryTerm(), message, replicaException, replicasProxy.failShard(shard, replicaRequest.primaryTerm(), message, replicaException,
ReplicationOperation.this::decPendingAndFinishIfNeeded, ReplicationOperation.this::decPendingAndFinishIfNeeded,
ReplicationOperation.this::onPrimaryDemoted, ReplicationOperation.this::onPrimaryDemoted,

View File

@ -248,4 +248,12 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
public String getDescription() { public String getDescription() {
return toString(); return toString();
} }
/**
* This method is called before this replication request is retried
* the first time.
*/
public void onRetry() {
// nothing by default
}
} }

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.support.replication; package org.elasticsearch.action.support.replication;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.ActionListenerResponseHandler;
@ -37,10 +38,12 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.AllocationId;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
@ -52,22 +55,26 @@ import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.IndexShardState;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.ShardNotFoundException;
import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.node.NodeClosedException;
import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskId;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.ConnectTransportException;
import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportChannelResponseHandler; import org.elasticsearch.transport.TransportChannelResponseHandler;
import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportRequestOptions;
import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportResponse.Empty; import org.elasticsearch.transport.TransportResponse.Empty;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
import java.io.IOException; import java.io.IOException;
import java.util.Objects;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Consumer; import java.util.function.Consumer;
import java.util.function.Supplier; import java.util.function.Supplier;
@ -114,9 +121,12 @@ public abstract class TransportReplicationAction<
this.transportPrimaryAction = actionName + "[p]"; this.transportPrimaryAction = actionName + "[p]";
this.transportReplicaAction = actionName + "[r]"; this.transportReplicaAction = actionName + "[r]";
transportService.registerRequestHandler(actionName, request, ThreadPool.Names.SAME, new OperationTransportHandler()); transportService.registerRequestHandler(actionName, request, ThreadPool.Names.SAME, new OperationTransportHandler());
transportService.registerRequestHandler(transportPrimaryAction, request, executor, new PrimaryOperationTransportHandler()); transportService.registerRequestHandler(transportPrimaryAction, () -> new ConcreteShardRequest<>(request), executor,
new PrimaryOperationTransportHandler());
// we must never reject on because of thread pool capacity on replicas // we must never reject on because of thread pool capacity on replicas
transportService.registerRequestHandler(transportReplicaAction, replicaRequest, executor, true, true, transportService.registerRequestHandler(transportReplicaAction,
() -> new ConcreteShardRequest<>(replicaRequest),
executor, true, true,
new ReplicaOperationTransportHandler()); new ReplicaOperationTransportHandler());
this.transportOptions = transportOptions(); this.transportOptions = transportOptions();
@ -162,7 +172,7 @@ public abstract class TransportReplicationAction<
/** /**
* Synchronous replica operation on nodes with replica copies. This is done under the lock form * Synchronous replica operation on nodes with replica copies. This is done under the lock form
* {@link #acquireReplicaOperationLock(ShardId, long, ActionListener)}. * {@link #acquireReplicaOperationLock(ShardId, long, String, ActionListener)}.
*/ */
protected abstract ReplicaResult shardOperationOnReplica(ReplicaRequest shardRequest); protected abstract ReplicaResult shardOperationOnReplica(ReplicaRequest shardRequest);
@ -215,7 +225,9 @@ public abstract class TransportReplicationAction<
channel.sendResponse(e); channel.sendResponse(e);
} catch (Exception inner) { } catch (Exception inner) {
inner.addSuppressed(e); inner.addSuppressed(e);
logger.warn("Failed to send response for {}", inner, actionName); logger.warn(
(org.apache.logging.log4j.util.Supplier<?>)
() -> new ParameterizedMessage("Failed to send response for {}", actionName), inner);
} }
} }
}); });
@ -227,33 +239,36 @@ public abstract class TransportReplicationAction<
} }
} }
class PrimaryOperationTransportHandler implements TransportRequestHandler<Request> { class PrimaryOperationTransportHandler implements TransportRequestHandler<ConcreteShardRequest<Request>> {
@Override @Override
public void messageReceived(final Request request, final TransportChannel channel) throws Exception { public void messageReceived(final ConcreteShardRequest<Request> request, final TransportChannel channel) throws Exception {
throw new UnsupportedOperationException("the task parameter is required for this operation"); throw new UnsupportedOperationException("the task parameter is required for this operation");
} }
@Override @Override
public void messageReceived(Request request, TransportChannel channel, Task task) { public void messageReceived(ConcreteShardRequest<Request> request, TransportChannel channel, Task task) {
new AsyncPrimaryAction(request, channel, (ReplicationTask) task).run(); new AsyncPrimaryAction(request.request, request.targetAllocationID, channel, (ReplicationTask) task).run();
} }
} }
class AsyncPrimaryAction extends AbstractRunnable implements ActionListener<PrimaryShardReference> { class AsyncPrimaryAction extends AbstractRunnable implements ActionListener<PrimaryShardReference> {
private final Request request; private final Request request;
/** targetAllocationID of the shard this request is meant for */
private final String targetAllocationID;
private final TransportChannel channel; private final TransportChannel channel;
private final ReplicationTask replicationTask; private final ReplicationTask replicationTask;
AsyncPrimaryAction(Request request, TransportChannel channel, ReplicationTask replicationTask) { AsyncPrimaryAction(Request request, String targetAllocationID, TransportChannel channel, ReplicationTask replicationTask) {
this.request = request; this.request = request;
this.targetAllocationID = targetAllocationID;
this.channel = channel; this.channel = channel;
this.replicationTask = replicationTask; this.replicationTask = replicationTask;
} }
@Override @Override
protected void doRun() throws Exception { protected void doRun() throws Exception {
acquirePrimaryShardReference(request.shardId(), this); acquirePrimaryShardReference(request.shardId(), targetAllocationID, this);
} }
@Override @Override
@ -268,7 +283,9 @@ public abstract class TransportReplicationAction<
final ShardRouting primary = primaryShardReference.routingEntry(); final ShardRouting primary = primaryShardReference.routingEntry();
assert primary.relocating() : "indexShard is marked as relocated but routing isn't" + primary; assert primary.relocating() : "indexShard is marked as relocated but routing isn't" + primary;
DiscoveryNode relocatingNode = clusterService.state().nodes().get(primary.relocatingNodeId()); DiscoveryNode relocatingNode = clusterService.state().nodes().get(primary.relocatingNodeId());
transportService.sendRequest(relocatingNode, transportPrimaryAction, request, transportOptions, transportService.sendRequest(relocatingNode, transportPrimaryAction,
new ConcreteShardRequest<>(request, primary.allocationId().getRelocationId()),
transportOptions,
new TransportChannelResponseHandler<Response>(logger, channel, "rerouting indexing to target primary " + primary, new TransportChannelResponseHandler<Response>(logger, channel, "rerouting indexing to target primary " + primary,
TransportReplicationAction.this::newResponseInstance) { TransportReplicationAction.this::newResponseInstance) {
@ -388,15 +405,17 @@ public abstract class TransportReplicationAction<
} }
} }
class ReplicaOperationTransportHandler implements TransportRequestHandler<ReplicaRequest> { class ReplicaOperationTransportHandler implements TransportRequestHandler<ConcreteShardRequest<ReplicaRequest>> {
@Override @Override
public void messageReceived(final ReplicaRequest request, final TransportChannel channel) throws Exception { public void messageReceived(final ConcreteShardRequest<ReplicaRequest> request, final TransportChannel channel)
throws Exception {
throw new UnsupportedOperationException("the task parameter is required for this operation"); throw new UnsupportedOperationException("the task parameter is required for this operation");
} }
@Override @Override
public void messageReceived(ReplicaRequest request, TransportChannel channel, Task task) throws Exception { public void messageReceived(ConcreteShardRequest<ReplicaRequest> requestWithAID, TransportChannel channel, Task task)
new AsyncReplicaAction(request, channel, (ReplicationTask) task).run(); throws Exception {
new AsyncReplicaAction(requestWithAID.request, requestWithAID.targetAllocationID, channel, (ReplicationTask) task).run();
} }
} }
@ -414,6 +433,8 @@ public abstract class TransportReplicationAction<
private final class AsyncReplicaAction extends AbstractRunnable implements ActionListener<Releasable> { private final class AsyncReplicaAction extends AbstractRunnable implements ActionListener<Releasable> {
private final ReplicaRequest request; private final ReplicaRequest request;
// allocation id of the replica this request is meant for
private final String targetAllocationID;
private final TransportChannel channel; private final TransportChannel channel;
/** /**
* The task on the node with the replica shard. * The task on the node with the replica shard.
@ -423,10 +444,11 @@ public abstract class TransportReplicationAction<
// something we want to avoid at all costs // something we want to avoid at all costs
private final ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext()); private final ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext());
AsyncReplicaAction(ReplicaRequest request, TransportChannel channel, ReplicationTask task) { AsyncReplicaAction(ReplicaRequest request, String targetAllocationID, TransportChannel channel, ReplicationTask task) {
this.request = request; this.request = request;
this.channel = channel; this.channel = channel;
this.task = task; this.task = task;
this.targetAllocationID = targetAllocationID;
} }
@Override @Override
@ -444,7 +466,13 @@ public abstract class TransportReplicationAction<
@Override @Override
public void onFailure(Exception e) { public void onFailure(Exception e) {
if (e instanceof RetryOnReplicaException) { if (e instanceof RetryOnReplicaException) {
logger.trace("Retrying operation on replica, action [{}], request [{}]", e, transportReplicaAction, request); logger.trace(
(org.apache.logging.log4j.util.Supplier<?>)
() -> new ParameterizedMessage(
"Retrying operation on replica, action [{}], request [{}]",
transportReplicaAction,
request),
e);
final ThreadContext.StoredContext context = threadPool.getThreadContext().newStoredContext(); final ThreadContext.StoredContext context = threadPool.getThreadContext().newStoredContext();
observer.waitForNextChange(new ClusterStateObserver.Listener() { observer.waitForNextChange(new ClusterStateObserver.Listener() {
@Override @Override
@ -455,7 +483,9 @@ public abstract class TransportReplicationAction<
String extraMessage = "action [" + transportReplicaAction + "], request[" + request + "]"; String extraMessage = "action [" + transportReplicaAction + "], request[" + request + "]";
TransportChannelResponseHandler<TransportResponse.Empty> handler = TransportChannelResponseHandler<TransportResponse.Empty> handler =
new TransportChannelResponseHandler<>(logger, channel, extraMessage, () -> TransportResponse.Empty.INSTANCE); new TransportChannelResponseHandler<>(logger, channel, extraMessage, () -> TransportResponse.Empty.INSTANCE);
transportService.sendRequest(clusterService.localNode(), transportReplicaAction, request, handler); transportService.sendRequest(clusterService.localNode(), transportReplicaAction,
new ConcreteShardRequest<>(request, targetAllocationID),
handler);
} }
@Override @Override
@ -479,7 +509,12 @@ public abstract class TransportReplicationAction<
channel.sendResponse(e); channel.sendResponse(e);
} catch (IOException responseException) { } catch (IOException responseException) {
responseException.addSuppressed(e); responseException.addSuppressed(e);
logger.warn("failed to send error message back to client for action [{}]", responseException, transportReplicaAction); logger.warn(
(org.apache.logging.log4j.util.Supplier<?>)
() -> new ParameterizedMessage(
"failed to send error message back to client for action [{}]",
transportReplicaAction),
responseException);
} }
} }
@ -487,7 +522,7 @@ public abstract class TransportReplicationAction<
protected void doRun() throws Exception { protected void doRun() throws Exception {
setPhase(task, "replica"); setPhase(task, "replica");
assert request.shardId() != null : "request shardId must be set"; assert request.shardId() != null : "request shardId must be set";
acquireReplicaOperationLock(request.shardId(), request.primaryTerm(), this); acquireReplicaOperationLock(request.shardId(), request.primaryTerm(), targetAllocationID, this);
} }
/** /**
@ -584,7 +619,7 @@ public abstract class TransportReplicationAction<
logger.trace("send action [{}] on primary [{}] for request [{}] with cluster state version [{}] to [{}] ", logger.trace("send action [{}] on primary [{}] for request [{}] with cluster state version [{}] to [{}] ",
transportPrimaryAction, request.shardId(), request, state.version(), primary.currentNodeId()); transportPrimaryAction, request.shardId(), request, state.version(), primary.currentNodeId());
} }
performAction(node, transportPrimaryAction, true); performAction(node, transportPrimaryAction, true, new ConcreteShardRequest<>(request, primary.allocationId().getId()));
} }
private void performRemoteAction(ClusterState state, ShardRouting primary, DiscoveryNode node) { private void performRemoteAction(ClusterState state, ShardRouting primary, DiscoveryNode node) {
@ -606,7 +641,7 @@ public abstract class TransportReplicationAction<
request.shardId(), request, state.version(), primary.currentNodeId()); request.shardId(), request, state.version(), primary.currentNodeId());
} }
setPhase(task, "rerouted"); setPhase(task, "rerouted");
performAction(node, actionName, false); performAction(node, actionName, false, request);
} }
private boolean retryIfUnavailable(ClusterState state, ShardRouting primary) { private boolean retryIfUnavailable(ClusterState state, ShardRouting primary) {
@ -657,8 +692,9 @@ public abstract class TransportReplicationAction<
} }
} }
private void performAction(final DiscoveryNode node, final String action, final boolean isPrimaryAction) { private void performAction(final DiscoveryNode node, final String action, final boolean isPrimaryAction,
transportService.sendRequest(node, action, request, transportOptions, new TransportResponseHandler<Response>() { final TransportRequest requestToPerform) {
transportService.sendRequest(node, action, requestToPerform, transportOptions, new TransportResponseHandler<Response>() {
@Override @Override
public Response newInstance() { public Response newInstance() {
@ -682,8 +718,12 @@ public abstract class TransportReplicationAction<
final Throwable cause = exp.unwrapCause(); final Throwable cause = exp.unwrapCause();
if (cause instanceof ConnectTransportException || cause instanceof NodeClosedException || if (cause instanceof ConnectTransportException || cause instanceof NodeClosedException ||
(isPrimaryAction && retryPrimaryException(cause))) { (isPrimaryAction && retryPrimaryException(cause))) {
logger.trace("received an error from node [{}] for request [{}], scheduling a retry", exp, node.getId(), logger.trace(
request); (org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage(
"received an error from node [{}] for request [{}], scheduling a retry",
node.getId(),
requestToPerform),
exp);
retry(exp); retry(exp);
} else { } else {
finishAsFailed(exp); finishAsFailed(exp);
@ -704,6 +744,7 @@ public abstract class TransportReplicationAction<
return; return;
} }
setPhase(task, "waiting_for_retry"); setPhase(task, "waiting_for_retry");
request.onRetry();
final ThreadContext.StoredContext context = threadPool.getThreadContext().newStoredContext(); final ThreadContext.StoredContext context = threadPool.getThreadContext().newStoredContext();
observer.waitForNextChange(new ClusterStateObserver.Listener() { observer.waitForNextChange(new ClusterStateObserver.Listener() {
@Override @Override
@ -729,7 +770,9 @@ public abstract class TransportReplicationAction<
void finishAsFailed(Exception failure) { void finishAsFailed(Exception failure) {
if (finished.compareAndSet(false, true)) { if (finished.compareAndSet(false, true)) {
setPhase(task, "failed"); setPhase(task, "failed");
logger.trace("operation failed. action [{}], request [{}]", failure, actionName, request); logger.trace(
(org.apache.logging.log4j.util.Supplier<?>)
() -> new ParameterizedMessage("operation failed. action [{}], request [{}]", actionName, request), failure);
listener.onFailure(failure); listener.onFailure(failure);
} else { } else {
assert false : "finishAsFailed called but operation is already finished"; assert false : "finishAsFailed called but operation is already finished";
@ -737,7 +780,13 @@ public abstract class TransportReplicationAction<
} }
void finishWithUnexpectedFailure(Exception failure) { void finishWithUnexpectedFailure(Exception failure) {
logger.warn("unexpected error during the primary phase for action [{}], request [{}]", failure, actionName, request); logger.warn(
(org.apache.logging.log4j.util.Supplier<?>)
() -> new ParameterizedMessage(
"unexpected error during the primary phase for action [{}], request [{}]",
actionName,
request),
failure);
if (finished.compareAndSet(false, true)) { if (finished.compareAndSet(false, true)) {
setPhase(task, "failed"); setPhase(task, "failed");
listener.onFailure(failure); listener.onFailure(failure);
@ -767,7 +816,8 @@ public abstract class TransportReplicationAction<
* tries to acquire reference to {@link IndexShard} to perform a primary operation. Released after performing primary operation locally * tries to acquire reference to {@link IndexShard} to perform a primary operation. Released after performing primary operation locally
* and replication of the operation to all replica shards is completed / failed (see {@link ReplicationOperation}). * and replication of the operation to all replica shards is completed / failed (see {@link ReplicationOperation}).
*/ */
protected void acquirePrimaryShardReference(ShardId shardId, ActionListener<PrimaryShardReference> onReferenceAcquired) { protected void acquirePrimaryShardReference(ShardId shardId, String allocationId,
ActionListener<PrimaryShardReference> onReferenceAcquired) {
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
IndexShard indexShard = indexService.getShard(shardId.id()); IndexShard indexShard = indexService.getShard(shardId.id());
// we may end up here if the cluster state used to route the primary is so stale that the underlying // we may end up here if the cluster state used to route the primary is so stale that the underlying
@ -777,6 +827,10 @@ public abstract class TransportReplicationAction<
throw new ReplicationOperation.RetryOnPrimaryException(indexShard.shardId(), throw new ReplicationOperation.RetryOnPrimaryException(indexShard.shardId(),
"actual shard is not a primary " + indexShard.routingEntry()); "actual shard is not a primary " + indexShard.routingEntry());
} }
final String actualAllocationId = indexShard.routingEntry().allocationId().getId();
if (actualAllocationId.equals(allocationId) == false) {
throw new ShardNotFoundException(shardId, "expected aID [{}] but found [{}]", allocationId, actualAllocationId);
}
ActionListener<Releasable> onAcquired = new ActionListener<Releasable>() { ActionListener<Releasable> onAcquired = new ActionListener<Releasable>() {
@Override @Override
@ -796,9 +850,14 @@ public abstract class TransportReplicationAction<
/** /**
* tries to acquire an operation on replicas. The lock is closed as soon as replication is completed on the node. * tries to acquire an operation on replicas. The lock is closed as soon as replication is completed on the node.
*/ */
protected void acquireReplicaOperationLock(ShardId shardId, long primaryTerm, ActionListener<Releasable> onLockAcquired) { protected void acquireReplicaOperationLock(ShardId shardId, long primaryTerm, final String allocationId,
ActionListener<Releasable> onLockAcquired) {
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
IndexShard indexShard = indexService.getShard(shardId.id()); IndexShard indexShard = indexService.getShard(shardId.id());
final String actualAllocationId = indexShard.routingEntry().allocationId().getId();
if (actualAllocationId.equals(allocationId) == false) {
throw new ShardNotFoundException(shardId, "expected aID [{}] but found [{}]", allocationId, actualAllocationId);
}
indexShard.acquireReplicaOperationLock(primaryTerm, onLockAcquired, executor); indexShard.acquireReplicaOperationLock(primaryTerm, onLockAcquired, executor);
} }
@ -861,7 +920,8 @@ public abstract class TransportReplicationAction<
listener.onFailure(new NoNodeAvailableException("unknown node [" + nodeId + "]")); listener.onFailure(new NoNodeAvailableException("unknown node [" + nodeId + "]"));
return; return;
} }
transportService.sendRequest(node, transportReplicaAction, request, transportOptions, transportService.sendRequest(node, transportReplicaAction,
new ConcreteShardRequest<>(request, replica.allocationId().getId()), transportOptions,
new ActionListenerResponseHandler<>(listener, () -> TransportResponse.Empty.INSTANCE)); new ActionListenerResponseHandler<>(listener, () -> TransportResponse.Empty.INSTANCE));
} }
@ -903,6 +963,72 @@ public abstract class TransportReplicationAction<
} }
} }
/** a wrapper class to encapsulate a request when being sent to a specific allocation id **/
public static final class ConcreteShardRequest<R extends TransportRequest> extends TransportRequest {
/** {@link AllocationId#getId()} of the shard this request is sent to **/
private String targetAllocationID;
private R request;
ConcreteShardRequest(Supplier<R> requestSupplier) {
request = requestSupplier.get();
// null now, but will be populated by reading from the streams
targetAllocationID = null;
}
ConcreteShardRequest(R request, String targetAllocationID) {
Objects.requireNonNull(request);
Objects.requireNonNull(targetAllocationID);
this.request = request;
this.targetAllocationID = targetAllocationID;
}
@Override
public void setParentTask(String parentTaskNode, long parentTaskId) {
request.setParentTask(parentTaskNode, parentTaskId);
}
@Override
public void setParentTask(TaskId taskId) {
request.setParentTask(taskId);
}
@Override
public TaskId getParentTask() {
return request.getParentTask();
}
@Override
public Task createTask(long id, String type, String action, TaskId parentTaskId) {
return request.createTask(id, type, action, parentTaskId);
}
@Override
public String getDescription() {
return "[" + request.getDescription() + "] for aID [" + targetAllocationID + "]";
}
@Override
public void readFrom(StreamInput in) throws IOException {
targetAllocationID = in.readString();
request.readFrom(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(targetAllocationID);
request.writeTo(out);
}
public R getRequest() {
return request;
}
public String getTargetAllocationID() {
return targetAllocationID;
}
}
/** /**
* Sets the current phase on the task if it isn't null. Pulled into its own * Sets the current phase on the task if it isn't null. Pulled into its own
* method because its more convenient that way. * method because its more convenient that way.

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.support.replication; package org.elasticsearch.action.support.replication;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.WriteRequest;
@ -27,7 +28,6 @@ import org.elasticsearch.cluster.action.shard.ShardStateAction;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShard;
@ -241,13 +241,13 @@ public abstract class TransportWriteAction<
private final RespondingWriteResult respond; private final RespondingWriteResult respond;
private final IndexShard indexShard; private final IndexShard indexShard;
private final WriteRequest<?> request; private final WriteRequest<?> request;
private final ESLogger logger; private final Logger logger;
AsyncAfterWriteAction(final IndexShard indexShard, AsyncAfterWriteAction(final IndexShard indexShard,
final WriteRequest<?> request, final WriteRequest<?> request,
@Nullable final Translog.Location location, @Nullable final Translog.Location location,
final RespondingWriteResult respond, final RespondingWriteResult respond,
final ESLogger logger) { final Logger logger) {
this.indexShard = indexShard; this.indexShard = indexShard;
this.request = request; this.request = request;
boolean waitUntilRefresh = false; boolean waitUntilRefresh = false;

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.support.single.shard; package org.elasticsearch.action.support.single.shard;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.NoShardAvailableActionException;
@ -39,10 +40,10 @@ import org.elasticsearch.common.logging.LoggerMessageFormat;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
import java.util.function.Supplier; import java.util.function.Supplier;
@ -187,7 +188,9 @@ public abstract class TransportSingleShardAction<Request extends SingleShardRequ
private void onFailure(ShardRouting shardRouting, Exception e) { private void onFailure(ShardRouting shardRouting, Exception e) {
if (logger.isTraceEnabled() && e != null) { if (logger.isTraceEnabled() && e != null) {
logger.trace("{}: failed to execute [{}]", e, shardRouting, internalRequest.request()); logger.trace(
(org.apache.logging.log4j.util.Supplier<?>)
() -> new ParameterizedMessage("{}: failed to execute [{}]", shardRouting, internalRequest.request()), e);
} }
perform(e); perform(e);
} }
@ -205,7 +208,9 @@ public abstract class TransportSingleShardAction<Request extends SingleShardRequ
failure = new NoShardAvailableActionException(null, LoggerMessageFormat.format("No shard available for [{}]", internalRequest.request()), failure); failure = new NoShardAvailableActionException(null, LoggerMessageFormat.format("No shard available for [{}]", internalRequest.request()), failure);
} else { } else {
if (logger.isDebugEnabled()) { if (logger.isDebugEnabled()) {
logger.debug("{}: failed to execute [{}]", failure, null, internalRequest.request()); logger.debug(
(org.apache.logging.log4j.util.Supplier<?>)
() -> new ParameterizedMessage("{}: failed to execute [{}]", null, internalRequest.request()), failure);
} }
} }
listener.onFailure(failure); listener.onFailure(failure);

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.support.tasks; package org.elasticsearch.action.support.tasks;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.FailedNodeException;
@ -38,7 +39,6 @@ import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.NodeShouldNotConnectException; import org.elasticsearch.transport.NodeShouldNotConnectException;
import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportException;
@ -46,6 +46,7 @@ import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportRequestOptions;
import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
import java.io.IOException; import java.io.IOException;
@ -275,7 +276,9 @@ public abstract class TransportTasksAction<
private void onFailure(int idx, String nodeId, Throwable t) { private void onFailure(int idx, String nodeId, Throwable t) {
if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) { if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) {
logger.debug("failed to execute on node [{}]", t, nodeId); logger.debug(
(org.apache.logging.log4j.util.Supplier<?>)
() -> new ParameterizedMessage("failed to execute on node [{}]", nodeId), t);
} }
if (accumulateExceptions()) { if (accumulateExceptions()) {
responses.set(idx, new FailedNodeException(nodeId, "Failed node [" + nodeId + "]", t)); responses.set(idx, new FailedNodeException(nodeId, "Failed node [" + nodeId + "]", t));

View File

@ -19,6 +19,8 @@
package org.elasticsearch.action.termvectors; package org.elasticsearch.action.termvectors;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.action.support.TransportActions;
@ -87,7 +89,7 @@ public class TransportShardMultiTermsVectorAction extends TransportSingleShardAc
if (TransportActions.isShardNotAvailableException(t)) { if (TransportActions.isShardNotAvailableException(t)) {
throw (ElasticsearchException) t; throw (ElasticsearchException) t;
} else { } else {
logger.debug("{} failed to execute multi term vectors for [{}]/[{}]", t, shardId, termVectorsRequest.type(), termVectorsRequest.id()); logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute multi term vectors for [{}]/[{}]", shardId, termVectorsRequest.type(), termVectorsRequest.id()), t);
response.add(request.locations.get(i), response.add(request.locations.get(i),
new MultiTermVectorsResponse.Failure(request.index(), termVectorsRequest.type(), termVectorsRequest.id(), t)); new MultiTermVectorsResponse.Failure(request.index(), termVectorsRequest.type(), termVectorsRequest.id(), t));
} }

View File

@ -19,6 +19,7 @@
package org.elasticsearch.bootstrap; package org.elasticsearch.bootstrap;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.Constants; import org.apache.lucene.util.Constants;
import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.StringHelper; import org.apache.lucene.util.StringHelper;
@ -28,7 +29,6 @@ import org.elasticsearch.cli.Terminal;
import org.elasticsearch.common.PidFile; import org.elasticsearch.common.PidFile;
import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.inject.CreationException; import org.elasticsearch.common.inject.CreationException;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.LogConfigurator; import org.elasticsearch.common.logging.LogConfigurator;
import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
@ -81,7 +81,7 @@ final class Bootstrap {
/** initialize native resources */ /** initialize native resources */
public static void initializeNatives(Path tmpFile, boolean mlockAll, boolean seccomp, boolean ctrlHandler) { public static void initializeNatives(Path tmpFile, boolean mlockAll, boolean seccomp, boolean ctrlHandler) {
final ESLogger logger = Loggers.getLogger(Bootstrap.class); final Logger logger = Loggers.getLogger(Bootstrap.class);
// check if the user is running as root, and bail // check if the user is running as root, and bail
if (Natives.definitelyRunningAsRoot()) { if (Natives.definitelyRunningAsRoot()) {
@ -227,7 +227,7 @@ final class Bootstrap {
INSTANCE = new Bootstrap(); INSTANCE = new Bootstrap();
Environment environment = initialEnvironment(foreground, pidFile, esSettings); Environment environment = initialEnvironment(foreground, pidFile, esSettings);
LogConfigurator.configure(environment.settings(), true); LogConfigurator.configure(environment, true);
checkForCustomConfFile(); checkForCustomConfFile();
if (environment.pidFile() != null) { if (environment.pidFile() != null) {
@ -264,7 +264,7 @@ final class Bootstrap {
if (foreground) { if (foreground) {
Loggers.disableConsoleLogging(); Loggers.disableConsoleLogging();
} }
ESLogger logger = Loggers.getLogger(Bootstrap.class); Logger logger = Loggers.getLogger(Bootstrap.class);
if (INSTANCE.node != null) { if (INSTANCE.node != null) {
logger = Loggers.getLogger(Bootstrap.class, Node.NODE_NAME_SETTING.get(INSTANCE.node.settings())); logger = Loggers.getLogger(Bootstrap.class, Node.NODE_NAME_SETTING.get(INSTANCE.node.settings()));
} }
@ -310,7 +310,7 @@ final class Bootstrap {
private static void checkUnsetAndMaybeExit(String confFileSetting, String settingName) { private static void checkUnsetAndMaybeExit(String confFileSetting, String settingName) {
if (confFileSetting != null && confFileSetting.isEmpty() == false) { if (confFileSetting != null && confFileSetting.isEmpty() == false) {
ESLogger logger = Loggers.getLogger(Bootstrap.class); Logger logger = Loggers.getLogger(Bootstrap.class);
logger.info("{} is no longer supported. elasticsearch.yml must be placed in the config directory and cannot be renamed.", settingName); logger.info("{} is no longer supported. elasticsearch.yml must be placed in the config directory and cannot be renamed.", settingName);
exit(1); exit(1);
} }

View File

@ -19,15 +19,16 @@
package org.elasticsearch.bootstrap; package org.elasticsearch.bootstrap;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.util.Constants; import org.apache.lucene.util.Constants;
import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.BoundTransportAddress;
import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.monitor.jvm.JvmInfo;
import org.elasticsearch.monitor.process.ProcessProbe; import org.elasticsearch.monitor.process.ProcessProbe;
import org.elasticsearch.node.Node; import org.elasticsearch.node.Node;
@ -100,7 +101,7 @@ final class BootstrapCheck {
final boolean enforceLimits, final boolean enforceLimits,
final boolean ignoreSystemChecks, final boolean ignoreSystemChecks,
final List<Check> checks, final List<Check> checks,
final ESLogger logger) { final Logger logger) {
final List<String> errors = new ArrayList<>(); final List<String> errors = new ArrayList<>();
final List<String> ignoredErrors = new ArrayList<>(); final List<String> ignoredErrors = new ArrayList<>();
@ -136,7 +137,7 @@ final class BootstrapCheck {
} }
static void log(final ESLogger logger, final String error) { static void log(final Logger logger, final String error) {
logger.warn(error); logger.warn(error);
} }
@ -417,7 +418,7 @@ final class BootstrapCheck {
} }
// visible for testing // visible for testing
long getMaxMapCount(ESLogger logger) { long getMaxMapCount(Logger logger) {
final Path path = getProcSysVmMaxMapCountPath(); final Path path = getProcSysVmMaxMapCountPath();
try (final BufferedReader bufferedReader = getBufferedReader(path)) { try (final BufferedReader bufferedReader = getBufferedReader(path)) {
final String rawProcSysVmMaxMapCount = readProcSysVmMaxMapCount(bufferedReader); final String rawProcSysVmMaxMapCount = readProcSysVmMaxMapCount(bufferedReader);
@ -425,11 +426,15 @@ final class BootstrapCheck {
try { try {
return parseProcSysVmMaxMapCount(rawProcSysVmMaxMapCount); return parseProcSysVmMaxMapCount(rawProcSysVmMaxMapCount);
} catch (final NumberFormatException e) { } catch (final NumberFormatException e) {
logger.warn("unable to parse vm.max_map_count [{}]", e, rawProcSysVmMaxMapCount); logger.warn(
(Supplier<?>) () -> new ParameterizedMessage(
"unable to parse vm.max_map_count [{}]",
rawProcSysVmMaxMapCount),
e);
} }
} }
} catch (final IOException e) { } catch (final IOException e) {
logger.warn("I/O exception while trying to read [{}]", e, path); logger.warn((Supplier<?>) () -> new ParameterizedMessage("I/O exception while trying to read [{}]", path), e);
} }
return -1; return -1;
} }

View File

@ -116,4 +116,5 @@ class Elasticsearch extends SettingCommand {
static void close(String[] args) throws IOException { static void close(String[] args) throws IOException {
Bootstrap.stop(); Bootstrap.stop();
} }
} }

View File

@ -19,9 +19,10 @@
package org.elasticsearch.bootstrap; package org.elasticsearch.bootstrap;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.lucene.index.MergePolicy; import org.apache.lucene.index.MergePolicy;
import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.logging.Loggers;
import java.io.IOError; import java.io.IOError;
@ -76,14 +77,17 @@ class ElasticsearchUncaughtExceptionHandler implements Thread.UncaughtExceptionH
// visible for testing // visible for testing
void onFatalUncaught(final String threadName, final Throwable t) { void onFatalUncaught(final String threadName, final Throwable t) {
final ESLogger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get()); final Logger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get());
logger.error("fatal error in thread [{}], exiting", t, threadName); logger.error(
(org.apache.logging.log4j.util.Supplier<?>)
() -> new ParameterizedMessage("fatal error in thread [{}], exiting", threadName), t);
} }
// visible for testing // visible for testing
void onNonFatalUncaught(final String threadName, final Throwable t) { void onNonFatalUncaught(final String threadName, final Throwable t) {
final ESLogger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get()); final Logger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get());
logger.warn("uncaught exception in thread [{}]", t, threadName); logger.warn((org.apache.logging.log4j.util.Supplier<?>)
() -> new ParameterizedMessage("uncaught exception in thread [{}]", threadName), t);
} }
// visible for testing // visible for testing

View File

@ -22,8 +22,8 @@ package org.elasticsearch.bootstrap;
import com.sun.jna.Native; import com.sun.jna.Native;
import com.sun.jna.NativeLong; import com.sun.jna.NativeLong;
import com.sun.jna.Structure; import com.sun.jna.Structure;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.Constants; import org.apache.lucene.util.Constants;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.logging.Loggers;
import java.util.Arrays; import java.util.Arrays;
@ -34,7 +34,7 @@ import java.util.List;
*/ */
final class JNACLibrary { final class JNACLibrary {
private static final ESLogger logger = Loggers.getLogger(JNACLibrary.class); private static final Logger logger = Loggers.getLogger(JNACLibrary.class);
public static final int MCL_CURRENT = 1; public static final int MCL_CURRENT = 1;
public static final int ENOMEM = 12; public static final int ENOMEM = 12;

View File

@ -25,8 +25,8 @@ import com.sun.jna.NativeLong;
import com.sun.jna.Pointer; import com.sun.jna.Pointer;
import com.sun.jna.Structure; import com.sun.jna.Structure;
import com.sun.jna.win32.StdCallLibrary; import com.sun.jna.win32.StdCallLibrary;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.Constants; import org.apache.lucene.util.Constants;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.logging.Loggers;
import java.util.ArrayList; import java.util.ArrayList;
@ -40,7 +40,7 @@ import java.util.List;
*/ */
final class JNAKernel32Library { final class JNAKernel32Library {
private static final ESLogger logger = Loggers.getLogger(JNAKernel32Library.class); private static final Logger logger = Loggers.getLogger(JNAKernel32Library.class);
// Callbacks must be kept around in order to be able to be called later, // Callbacks must be kept around in order to be able to be called later,
// when the Windows ConsoleCtrlHandler sends an event. // when the Windows ConsoleCtrlHandler sends an event.

View File

@ -21,8 +21,8 @@ package org.elasticsearch.bootstrap;
import com.sun.jna.Native; import com.sun.jna.Native;
import com.sun.jna.Pointer; import com.sun.jna.Pointer;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.Constants; import org.apache.lucene.util.Constants;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.monitor.jvm.JvmInfo;
@ -39,7 +39,7 @@ class JNANatives {
/** no instantiation */ /** no instantiation */
private JNANatives() {} private JNANatives() {}
private static final ESLogger logger = Loggers.getLogger(JNANatives.class); private static final Logger logger = Loggers.getLogger(JNANatives.class);
// Set to true, in case native mlockall call was successful // Set to true, in case native mlockall call was successful
static boolean LOCAL_MLOCKALL = false; static boolean LOCAL_MLOCKALL = false;

View File

@ -19,10 +19,10 @@
package org.elasticsearch.bootstrap; package org.elasticsearch.bootstrap;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.logging.Loggers;
import java.io.IOException; import java.io.IOException;
@ -76,7 +76,7 @@ public class JarHell {
*/ */
public static void checkJarHell() throws Exception { public static void checkJarHell() throws Exception {
ClassLoader loader = JarHell.class.getClassLoader(); ClassLoader loader = JarHell.class.getClassLoader();
ESLogger logger = Loggers.getLogger(JarHell.class); Logger logger = Loggers.getLogger(JarHell.class);
if (logger.isDebugEnabled()) { if (logger.isDebugEnabled()) {
logger.debug("java.class.path: {}", System.getProperty("java.class.path")); logger.debug("java.class.path: {}", System.getProperty("java.class.path"));
logger.debug("sun.boot.class.path: {}", System.getProperty("sun.boot.class.path")); logger.debug("sun.boot.class.path: {}", System.getProperty("sun.boot.class.path"));
@ -86,7 +86,7 @@ public class JarHell {
} }
checkJarHell(parseClassPath()); checkJarHell(parseClassPath());
} }
/** /**
* Parses the classpath into an array of URLs * Parses the classpath into an array of URLs
* @return array of URLs * @return array of URLs
@ -150,7 +150,7 @@ public class JarHell {
*/ */
@SuppressForbidden(reason = "needs JarFile for speed, just reading entries") @SuppressForbidden(reason = "needs JarFile for speed, just reading entries")
public static void checkJarHell(URL urls[]) throws Exception { public static void checkJarHell(URL urls[]) throws Exception {
ESLogger logger = Loggers.getLogger(JarHell.class); Logger logger = Loggers.getLogger(JarHell.class);
// we don't try to be sneaky and use deprecated/internal/not portable stuff // we don't try to be sneaky and use deprecated/internal/not portable stuff
// like sun.boot.class.path, and with jigsaw we don't yet have a way to get // like sun.boot.class.path, and with jigsaw we don't yet have a way to get
// a "list" at all. So just exclude any elements underneath the java home // a "list" at all. So just exclude any elements underneath the java home
@ -168,7 +168,7 @@ public class JarHell {
if (path.toString().endsWith(".jar")) { if (path.toString().endsWith(".jar")) {
if (!seenJars.add(path)) { if (!seenJars.add(path)) {
logger.debug("excluding duplicate classpath element: {}", path); logger.debug("excluding duplicate classpath element: {}", path);
continue; // we can't fail because of sheistiness with joda-time continue;
} }
logger.debug("examining jar: {}", path); logger.debug("examining jar: {}", path);
try (JarFile file = new JarFile(path.toString())) { try (JarFile file = new JarFile(path.toString())) {
@ -271,11 +271,13 @@ public class JarHell {
"class: " + clazz + System.lineSeparator() + "class: " + clazz + System.lineSeparator() +
"exists multiple times in jar: " + jarpath + " !!!!!!!!!"); "exists multiple times in jar: " + jarpath + " !!!!!!!!!");
} else { } else {
if (clazz.startsWith("org.apache.log4j")) { if (clazz.startsWith("org.apache.logging.log4j.core.impl.ThrowableProxy")) {
return; // go figure, jar hell for what should be System.out.println... /*
} * deliberate to hack around a bug in Log4j
if (clazz.equals("org.joda.time.base.BaseDateTime")) { * cf. https://github.com/elastic/elasticsearch/issues/20304
return; // apparently this is intentional... clean this up * cf. https://issues.apache.org/jira/browse/LOG4J2-1560
*/
return;
} }
throw new IllegalStateException("jar hell!" + System.lineSeparator() + throw new IllegalStateException("jar hell!" + System.lineSeparator() +
"class: " + clazz + System.lineSeparator() + "class: " + clazz + System.lineSeparator() +

View File

@ -19,7 +19,7 @@
package org.elasticsearch.bootstrap; package org.elasticsearch.bootstrap;
import org.elasticsearch.common.logging.ESLogger; import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.logging.Loggers;
import java.nio.file.Path; import java.nio.file.Path;
@ -32,7 +32,7 @@ final class Natives {
/** no instantiation */ /** no instantiation */
private Natives() {} private Natives() {}
private static final ESLogger logger = Loggers.getLogger(Natives.class); private static final Logger logger = Loggers.getLogger(Natives.class);
// marker to determine if the JNA class files are available to the JVM // marker to determine if the JNA class files are available to the JVM
static final boolean JNA_AVAILABLE; static final boolean JNA_AVAILABLE;

View File

@ -26,9 +26,9 @@ import com.sun.jna.NativeLong;
import com.sun.jna.Pointer; import com.sun.jna.Pointer;
import com.sun.jna.Structure; import com.sun.jna.Structure;
import com.sun.jna.ptr.PointerByReference; import com.sun.jna.ptr.PointerByReference;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.Constants; import org.apache.lucene.util.Constants;
import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.IOUtils;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.logging.Loggers;
import java.io.IOException; import java.io.IOException;
@ -92,7 +92,7 @@ import java.util.Map;
*/ */
// not an example of how to write code!!! // not an example of how to write code!!!
final class Seccomp { final class Seccomp {
private static final ESLogger logger = Loggers.getLogger(Seccomp.class); private static final Logger logger = Loggers.getLogger(Seccomp.class);
// Linux implementation, based on seccomp(2) or prctl(2) with bpf filtering // Linux implementation, based on seccomp(2) or prctl(2) with bpf filtering

View File

@ -20,7 +20,8 @@
package org.elasticsearch.client.transport; package org.elasticsearch.client.transport;
import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
@ -32,9 +33,8 @@ import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
import org.elasticsearch.client.Requests; import org.elasticsearch.client.Requests;
import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
@ -43,11 +43,11 @@ import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.common.util.concurrent.FutureUtils;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.ConnectTransportException;
import org.elasticsearch.transport.FutureTransportResponseHandler; import org.elasticsearch.transport.FutureTransportResponseHandler;
import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportRequestOptions;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
import java.io.Closeable; import java.io.Closeable;
@ -340,7 +340,7 @@ public class TransportClientNodesService extends AbstractComponent implements Cl
transportService.connectToNode(node); transportService.connectToNode(node);
} catch (Exception e) { } catch (Exception e) {
it.remove(); it.remove();
logger.debug("failed to connect to discovered node [{}]", e, node); logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to connect to discovered node [{}]", node), e);
} }
} }
} }
@ -377,7 +377,9 @@ public class TransportClientNodesService extends AbstractComponent implements Cl
logger.trace("connecting to listed node (light) [{}]", listedNode); logger.trace("connecting to listed node (light) [{}]", listedNode);
transportService.connectToNodeLight(listedNode); transportService.connectToNodeLight(listedNode);
} catch (Exception e) { } catch (Exception e) {
logger.debug("failed to connect to node [{}], removed from nodes list", e, listedNode); logger.debug(
(Supplier<?>)
() -> new ParameterizedMessage("failed to connect to node [{}], removed from nodes list", listedNode), e);
newFilteredNodes.add(listedNode); newFilteredNodes.add(listedNode);
continue; continue;
} }
@ -409,7 +411,8 @@ public class TransportClientNodesService extends AbstractComponent implements Cl
newNodes.add(listedNode); newNodes.add(listedNode);
} }
} catch (Exception e) { } catch (Exception e) {
logger.info("failed to get node info for {}, disconnecting...", e, listedNode); logger.info(
(Supplier<?>) () -> new ParameterizedMessage("failed to get node info for {}, disconnecting...", listedNode), e);
transportService.disconnectFromNode(listedNode); transportService.disconnectFromNode(listedNode);
} }
} }
@ -453,7 +456,9 @@ public class TransportClientNodesService extends AbstractComponent implements Cl
transportService.connectToNodeLight(listedNode); transportService.connectToNodeLight(listedNode);
} }
} catch (Exception e) { } catch (Exception e) {
logger.debug("failed to connect to node [{}], ignoring...", e, listedNode); logger.debug(
(Supplier<?>)
() -> new ParameterizedMessage("failed to connect to node [{}], ignoring...", listedNode), e);
latch.countDown(); latch.countDown();
return; return;
} }
@ -482,13 +487,17 @@ public class TransportClientNodesService extends AbstractComponent implements Cl
@Override @Override
public void handleException(TransportException e) { public void handleException(TransportException e) {
logger.info("failed to get local cluster state for {}, disconnecting...", e, listedNode); logger.info(
(Supplier<?>) () -> new ParameterizedMessage(
"failed to get local cluster state for {}, disconnecting...", listedNode), e);
transportService.disconnectFromNode(listedNode); transportService.disconnectFromNode(listedNode);
latch.countDown(); latch.countDown();
} }
}); });
} catch (Exception e) { } catch (Exception e) {
logger.info("failed to get local cluster state info for {}, disconnecting...", e, listedNode); logger.info(
(Supplier<?>)() -> new ParameterizedMessage(
"failed to get local cluster state info for {}, disconnecting...", listedNode), e);
transportService.disconnectFromNode(listedNode); transportService.disconnectFromNode(listedNode);
latch.countDown(); latch.countDown();
} }

View File

@ -19,6 +19,7 @@
package org.elasticsearch.cluster; package org.elasticsearch.cluster;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
import org.elasticsearch.cluster.action.index.NodeMappingRefreshAction; import org.elasticsearch.cluster.action.index.NodeMappingRefreshAction;
import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.action.shard.ShardStateAction;
@ -44,17 +45,16 @@ import org.elasticsearch.cluster.routing.allocation.decider.ConcurrentRebalanceA
import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider; import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider;
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.NodeVersionAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.NodeVersionAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.RebalanceOnlyWhenActiveAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.RebalanceOnlyWhenActiveAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryActiveAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryActiveAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting;

View File

@ -19,10 +19,10 @@
package org.elasticsearch.cluster; package org.elasticsearch.cluster;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.util.concurrent.ThreadContext;
@ -35,7 +35,7 @@ import java.util.concurrent.atomic.AtomicReference;
*/ */
public class ClusterStateObserver { public class ClusterStateObserver {
protected final ESLogger logger; protected final Logger logger;
public final ChangePredicate MATCH_ALL_CHANGES_PREDICATE = new EventPredicate() { public final ChangePredicate MATCH_ALL_CHANGES_PREDICATE = new EventPredicate() {
@ -58,7 +58,7 @@ public class ClusterStateObserver {
volatile boolean timedOut; volatile boolean timedOut;
public ClusterStateObserver(ClusterService clusterService, ESLogger logger, ThreadContext contextHolder) { public ClusterStateObserver(ClusterService clusterService, Logger logger, ThreadContext contextHolder) {
this(clusterService, new TimeValue(60000), logger, contextHolder); this(clusterService, new TimeValue(60000), logger, contextHolder);
} }
@ -67,7 +67,7 @@ public class ClusterStateObserver {
* will fail any existing or new #waitForNextChange calls. Set to null * will fail any existing or new #waitForNextChange calls. Set to null
* to wait indefinitely * to wait indefinitely
*/ */
public ClusterStateObserver(ClusterService clusterService, @Nullable TimeValue timeout, ESLogger logger, ThreadContext contextHolder) { public ClusterStateObserver(ClusterService clusterService, @Nullable TimeValue timeout, Logger logger, ThreadContext contextHolder) {
this.clusterService = clusterService; this.clusterService = clusterService;
this.lastObservedState = new AtomicReference<>(new ObservedState(clusterService.state())); this.lastObservedState = new AtomicReference<>(new ObservedState(clusterService.state()));
this.timeOutValue = timeout; this.timeOutValue = timeout;

View File

@ -19,6 +19,7 @@
package org.elasticsearch.cluster; package org.elasticsearch.cluster;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.LatchedActionListener;
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
@ -39,7 +40,6 @@ import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Setting.Property;
@ -379,7 +379,7 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
return clusterInfo; return clusterInfo;
} }
static void buildShardLevelInfo(ESLogger logger, ShardStats[] stats, ImmutableOpenMap.Builder<String, Long> newShardSizes, static void buildShardLevelInfo(Logger logger, ShardStats[] stats, ImmutableOpenMap.Builder<String, Long> newShardSizes,
ImmutableOpenMap.Builder<ShardRouting, String> newShardRoutingToDataPath, ClusterState state) { ImmutableOpenMap.Builder<ShardRouting, String> newShardRoutingToDataPath, ClusterState state) {
MetaData meta = state.getMetaData(); MetaData meta = state.getMetaData();
for (ShardStats s : stats) { for (ShardStats s : stats) {
@ -402,7 +402,7 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
} }
} }
static void fillDiskUsagePerNode(ESLogger logger, List<NodeStats> nodeStatsArray, static void fillDiskUsagePerNode(Logger logger, List<NodeStats> nodeStatsArray,
ImmutableOpenMap.Builder<String, DiskUsage> newLeastAvaiableUsages, ImmutableOpenMap.Builder<String, DiskUsage> newLeastAvaiableUsages,
ImmutableOpenMap.Builder<String, DiskUsage> newMostAvaiableUsages) { ImmutableOpenMap.Builder<String, DiskUsage> newMostAvaiableUsages) {
for (NodeStats nodeStats : nodeStatsArray) { for (NodeStats nodeStats : nodeStatsArray) {

View File

@ -18,6 +18,8 @@
*/ */
package org.elasticsearch.cluster; package org.elasticsearch.cluster;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
@ -91,7 +93,7 @@ public class NodeConnectionsService extends AbstractLifecycleComponent {
try { try {
transportService.disconnectFromNode(node); transportService.disconnectFromNode(node);
} catch (Exception e) { } catch (Exception e) {
logger.warn("failed to disconnect to node [{}]", e, node); logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to disconnect to node [{}]", node), e);
} }
} }
} }
@ -113,7 +115,11 @@ public class NodeConnectionsService extends AbstractLifecycleComponent {
nodeFailureCount = nodeFailureCount + 1; nodeFailureCount = nodeFailureCount + 1;
// log every 6th failure // log every 6th failure
if ((nodeFailureCount % 6) == 1) { if ((nodeFailureCount % 6) == 1) {
logger.warn("failed to connect to node {} (tried [{}] times)", e, node, nodeFailureCount); final int finalNodeFailureCount = nodeFailureCount;
logger.warn(
(Supplier<?>)
() -> new ParameterizedMessage(
"failed to connect to node {} (tried [{}] times)", node, finalNodeFailureCount), e);
} }
nodes.put(node, nodeFailureCount); nodes.put(node, nodeFailureCount);
} }

View File

@ -19,6 +19,9 @@
package org.elasticsearch.cluster.action.shard; package org.elasticsearch.cluster.action.shard;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterChangedEvent;
@ -43,7 +46,6 @@ import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.Discovery;
@ -108,7 +110,7 @@ public class ShardStateAction extends AbstractComponent {
if (isMasterChannelException(exp)) { if (isMasterChannelException(exp)) {
waitForNewMasterAndRetry(actionName, observer, shardEntry, listener); waitForNewMasterAndRetry(actionName, observer, shardEntry, listener);
} else { } else {
logger.warn("{} unexpected failure while sending request [{}] to [{}] for shard entry [{}]", exp, shardEntry.shardId, actionName, masterNode, shardEntry); logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} unexpected failure while sending request [{}] to [{}] for shard entry [{}]", shardEntry.shardId, actionName, masterNode, shardEntry), exp);
listener.onFailure(exp instanceof RemoteTransportException ? (Exception) (exp.getCause() instanceof Exception ? exp.getCause() : new ElasticsearchException(exp.getCause())) : exp); listener.onFailure(exp instanceof RemoteTransportException ? (Exception) (exp.getCause() instanceof Exception ? exp.getCause() : new ElasticsearchException(exp.getCause())) : exp);
} }
} }
@ -169,7 +171,7 @@ public class ShardStateAction extends AbstractComponent {
@Override @Override
public void onClusterServiceClose() { public void onClusterServiceClose() {
logger.warn("{} node closed while execution action [{}] for shard entry [{}]", shardEntry.failure, shardEntry.shardId, actionName, shardEntry); logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} node closed while execution action [{}] for shard entry [{}]", shardEntry.shardId, actionName, shardEntry), shardEntry.failure);
listener.onFailure(new NodeClosedException(clusterService.localNode())); listener.onFailure(new NodeClosedException(clusterService.localNode()));
} }
@ -184,9 +186,9 @@ public class ShardStateAction extends AbstractComponent {
private static class ShardFailedTransportHandler implements TransportRequestHandler<ShardEntry> { private static class ShardFailedTransportHandler implements TransportRequestHandler<ShardEntry> {
private final ClusterService clusterService; private final ClusterService clusterService;
private final ShardFailedClusterStateTaskExecutor shardFailedClusterStateTaskExecutor; private final ShardFailedClusterStateTaskExecutor shardFailedClusterStateTaskExecutor;
private final ESLogger logger; private final Logger logger;
public ShardFailedTransportHandler(ClusterService clusterService, ShardFailedClusterStateTaskExecutor shardFailedClusterStateTaskExecutor, ESLogger logger) { public ShardFailedTransportHandler(ClusterService clusterService, ShardFailedClusterStateTaskExecutor shardFailedClusterStateTaskExecutor, Logger logger) {
this.clusterService = clusterService; this.clusterService = clusterService;
this.shardFailedClusterStateTaskExecutor = shardFailedClusterStateTaskExecutor; this.shardFailedClusterStateTaskExecutor = shardFailedClusterStateTaskExecutor;
this.logger = logger; this.logger = logger;
@ -194,7 +196,7 @@ public class ShardStateAction extends AbstractComponent {
@Override @Override
public void messageReceived(ShardEntry request, TransportChannel channel) throws Exception { public void messageReceived(ShardEntry request, TransportChannel channel) throws Exception {
logger.warn("{} received shard failed for {}", request.failure, request.shardId, request); logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} received shard failed for {}", request.shardId, request), request.failure);
clusterService.submitStateUpdateTask( clusterService.submitStateUpdateTask(
"shard-failed", "shard-failed",
request, request,
@ -203,12 +205,12 @@ public class ShardStateAction extends AbstractComponent {
new ClusterStateTaskListener() { new ClusterStateTaskListener() {
@Override @Override
public void onFailure(String source, Exception e) { public void onFailure(String source, Exception e) {
logger.error("{} unexpected failure while failing shard [{}]", e, request.shardId, request); logger.error((Supplier<?>) () -> new ParameterizedMessage("{} unexpected failure while failing shard [{}]", request.shardId, request), e);
try { try {
channel.sendResponse(e); channel.sendResponse(e);
} catch (Exception channelException) { } catch (Exception channelException) {
channelException.addSuppressed(e); channelException.addSuppressed(e);
logger.warn("{} failed to send failure [{}] while failing shard [{}]", channelException, request.shardId, e, request); logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} failed to send failure [{}] while failing shard [{}]", request.shardId, e, request), channelException);
} }
} }
@ -218,7 +220,7 @@ public class ShardStateAction extends AbstractComponent {
try { try {
channel.sendResponse(new NotMasterException(source)); channel.sendResponse(new NotMasterException(source));
} catch (Exception channelException) { } catch (Exception channelException) {
logger.warn("{} failed to send no longer master while failing shard [{}]", channelException, request.shardId, request); logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} failed to send no longer master while failing shard [{}]", request.shardId, request), channelException);
} }
} }
@ -227,7 +229,7 @@ public class ShardStateAction extends AbstractComponent {
try { try {
channel.sendResponse(TransportResponse.Empty.INSTANCE); channel.sendResponse(TransportResponse.Empty.INSTANCE);
} catch (Exception channelException) { } catch (Exception channelException) {
logger.warn("{} failed to send response while failing shard [{}]", channelException, request.shardId, request); logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} failed to send response while failing shard [{}]", request.shardId, request), channelException);
} }
} }
} }
@ -238,9 +240,9 @@ public class ShardStateAction extends AbstractComponent {
public static class ShardFailedClusterStateTaskExecutor implements ClusterStateTaskExecutor<ShardEntry> { public static class ShardFailedClusterStateTaskExecutor implements ClusterStateTaskExecutor<ShardEntry> {
private final AllocationService allocationService; private final AllocationService allocationService;
private final RoutingService routingService; private final RoutingService routingService;
private final ESLogger logger; private final Logger logger;
public ShardFailedClusterStateTaskExecutor(AllocationService allocationService, RoutingService routingService, ESLogger logger) { public ShardFailedClusterStateTaskExecutor(AllocationService allocationService, RoutingService routingService, Logger logger) {
this.allocationService = allocationService; this.allocationService = allocationService;
this.routingService = routingService; this.routingService = routingService;
this.logger = logger; this.logger = logger;
@ -315,7 +317,7 @@ public class ShardStateAction extends AbstractComponent {
} }
batchResultBuilder.successes(tasksToBeApplied); batchResultBuilder.successes(tasksToBeApplied);
} catch (Exception e) { } catch (Exception e) {
logger.warn("failed to apply failed shards {}", e, shardRoutingsToBeApplied); logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to apply failed shards {}", shardRoutingsToBeApplied), e);
// failures are communicated back to the requester // failures are communicated back to the requester
// cluster state will not be updated in this case // cluster state will not be updated in this case
batchResultBuilder.failures(tasksToBeApplied, e); batchResultBuilder.failures(tasksToBeApplied, e);
@ -352,9 +354,9 @@ public class ShardStateAction extends AbstractComponent {
private static class ShardStartedTransportHandler implements TransportRequestHandler<ShardEntry> { private static class ShardStartedTransportHandler implements TransportRequestHandler<ShardEntry> {
private final ClusterService clusterService; private final ClusterService clusterService;
private final ShardStartedClusterStateTaskExecutor shardStartedClusterStateTaskExecutor; private final ShardStartedClusterStateTaskExecutor shardStartedClusterStateTaskExecutor;
private final ESLogger logger; private final Logger logger;
public ShardStartedTransportHandler(ClusterService clusterService, ShardStartedClusterStateTaskExecutor shardStartedClusterStateTaskExecutor, ESLogger logger) { public ShardStartedTransportHandler(ClusterService clusterService, ShardStartedClusterStateTaskExecutor shardStartedClusterStateTaskExecutor, Logger logger) {
this.clusterService = clusterService; this.clusterService = clusterService;
this.shardStartedClusterStateTaskExecutor = shardStartedClusterStateTaskExecutor; this.shardStartedClusterStateTaskExecutor = shardStartedClusterStateTaskExecutor;
this.logger = logger; this.logger = logger;
@ -375,9 +377,9 @@ public class ShardStateAction extends AbstractComponent {
public static class ShardStartedClusterStateTaskExecutor implements ClusterStateTaskExecutor<ShardEntry>, ClusterStateTaskListener { public static class ShardStartedClusterStateTaskExecutor implements ClusterStateTaskExecutor<ShardEntry>, ClusterStateTaskListener {
private final AllocationService allocationService; private final AllocationService allocationService;
private final ESLogger logger; private final Logger logger;
public ShardStartedClusterStateTaskExecutor(AllocationService allocationService, ESLogger logger) { public ShardStartedClusterStateTaskExecutor(AllocationService allocationService, Logger logger) {
this.allocationService = allocationService; this.allocationService = allocationService;
this.logger = logger; this.logger = logger;
} }
@ -431,7 +433,7 @@ public class ShardStateAction extends AbstractComponent {
} }
builder.successes(tasksToBeApplied); builder.successes(tasksToBeApplied);
} catch (Exception e) { } catch (Exception e) {
logger.warn("failed to apply started shards {}", e, shardRoutingsToBeApplied); logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to apply started shards {}", shardRoutingsToBeApplied), e);
builder.failures(tasksToBeApplied, e); builder.failures(tasksToBeApplied, e);
} }
@ -440,7 +442,7 @@ public class ShardStateAction extends AbstractComponent {
@Override @Override
public void onFailure(String source, Exception e) { public void onFailure(String source, Exception e) {
logger.error("unexpected failure during [{}]", e, source); logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
} }
} }

View File

@ -22,6 +22,7 @@ package org.elasticsearch.cluster.metadata;
import com.carrotsearch.hppc.ObjectHashSet; import com.carrotsearch.hppc.ObjectHashSet;
import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.Diff;
import org.elasticsearch.cluster.Diffable; import org.elasticsearch.cluster.Diffable;
@ -38,7 +39,6 @@ import org.elasticsearch.common.collect.HppcMaps;
import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Setting.Property;
@ -758,7 +758,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
/** As of 2.0 we require units for time and byte-sized settings. This methods adds default units to any cluster settings that don't /** As of 2.0 we require units for time and byte-sized settings. This methods adds default units to any cluster settings that don't
* specify a unit. */ * specify a unit. */
public static MetaData addDefaultUnitsIfNeeded(ESLogger logger, MetaData metaData) { public static MetaData addDefaultUnitsIfNeeded(Logger logger, MetaData metaData) {
Settings.Builder newPersistentSettings = null; Settings.Builder newPersistentSettings = null;
for(Map.Entry<String,String> ent : metaData.persistentSettings().getAsMap().entrySet()) { for(Map.Entry<String,String> ent : metaData.persistentSettings().getAsMap().entrySet()) {
String settingName = ent.getKey(); String settingName = ent.getKey();

View File

@ -21,6 +21,8 @@ package org.elasticsearch.cluster.metadata;
import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version; import org.elasticsearch.Version;
@ -446,9 +448,9 @@ public class MetaDataCreateIndexService extends AbstractComponent {
@Override @Override
public void onFailure(String source, Exception e) { public void onFailure(String source, Exception e) {
if (e instanceof IndexAlreadyExistsException) { if (e instanceof IndexAlreadyExistsException) {
logger.trace("[{}] failed to create", e, request.index()); logger.trace((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to create", request.index()), e);
} else { } else {
logger.debug("[{}] failed to create", e, request.index()); logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to create", request.index()), e);
} }
super.onFailure(source, e); super.onFailure(source, e);
} }

View File

@ -20,7 +20,6 @@
package org.elasticsearch.cluster.metadata; package org.elasticsearch.cluster.metadata;
import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesClusterStateUpdateRequest; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesClusterStateUpdateRequest;
@ -64,7 +63,7 @@ public class MetaDataIndexAliasesService extends AbstractComponent {
private final AliasValidator aliasValidator; private final AliasValidator aliasValidator;
private final NodeServicesProvider nodeServicesProvider; private final NodeServicesProvider nodeServicesProvider;
private final MetaDataDeleteIndexService deleteIndexService; private final MetaDataDeleteIndexService deleteIndexService;
@Inject @Inject

View File

@ -20,6 +20,8 @@
package org.elasticsearch.cluster.metadata; package org.elasticsearch.cluster.metadata;
import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingClusterStateUpdateRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingClusterStateUpdateRequest;
import org.elasticsearch.cluster.AckedClusterStateTaskListener; import org.elasticsearch.cluster.AckedClusterStateTaskListener;
@ -193,7 +195,7 @@ public class MetaDataMappingService extends AbstractComponent {
} }
} }
} catch (Exception e) { } catch (Exception e) {
logger.warn("[{}] failed to refresh-mapping in cluster state", e, index); logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to refresh-mapping in cluster state", index), e);
} }
return dirty; return dirty;
} }
@ -207,7 +209,7 @@ public class MetaDataMappingService extends AbstractComponent {
refreshTask, refreshTask,
ClusterStateTaskConfig.build(Priority.HIGH), ClusterStateTaskConfig.build(Priority.HIGH),
refreshExecutor, refreshExecutor,
(source, e) -> logger.warn("failure during [{}]", e, source) (source, e) -> logger.warn((Supplier<?>) () -> new ParameterizedMessage("failure during [{}]", source), e)
); );
} }

View File

@ -21,6 +21,7 @@ package org.elasticsearch.cluster.routing;
import com.carrotsearch.hppc.ObjectIntHashMap; import com.carrotsearch.hppc.ObjectIntHashMap;
import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData;
@ -30,7 +31,6 @@ import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.index.Index; import org.elasticsearch.index.Index;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
@ -452,7 +452,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
* *
* @return the started shard * @return the started shard
*/ */
public ShardRouting startShard(ESLogger logger, ShardRouting initializingShard, RoutingChangesObserver routingChangesObserver) { public ShardRouting startShard(Logger logger, ShardRouting initializingShard, RoutingChangesObserver routingChangesObserver) {
ensureMutable(); ensureMutable();
ShardRouting startedShard = started(initializingShard); ShardRouting startedShard = started(initializingShard);
logger.trace("{} marked shard as started (routing: {})", initializingShard.shardId(), initializingShard); logger.trace("{} marked shard as started (routing: {})", initializingShard.shardId(), initializingShard);
@ -484,7 +484,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
* - If shard is a (primary or replica) relocation target, this also clears the relocation information on the source shard. * - If shard is a (primary or replica) relocation target, this also clears the relocation information on the source shard.
* *
*/ */
public void failShard(ESLogger logger, ShardRouting failedShard, UnassignedInfo unassignedInfo, IndexMetaData indexMetaData, public void failShard(Logger logger, ShardRouting failedShard, UnassignedInfo unassignedInfo, IndexMetaData indexMetaData,
RoutingChangesObserver routingChangesObserver) { RoutingChangesObserver routingChangesObserver) {
ensureMutable(); ensureMutable();
assert failedShard.assignedToNode() : "only assigned shards can be failed"; assert failedShard.assignedToNode() : "only assigned shards can be failed";

View File

@ -19,6 +19,8 @@
package org.elasticsearch.cluster.routing; package org.elasticsearch.cluster.routing;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterStateUpdateTask;
@ -113,16 +115,16 @@ public class RoutingService extends AbstractLifecycleComponent {
rerouting.set(false); rerouting.set(false);
ClusterState state = clusterService.state(); ClusterState state = clusterService.state();
if (logger.isTraceEnabled()) { if (logger.isTraceEnabled()) {
logger.error("unexpected failure during [{}], current state:\n{}", e, source, state.prettyPrint()); logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}], current state:\n{}", source, state.prettyPrint()), e);
} else { } else {
logger.error("unexpected failure during [{}], current state version [{}]", e, source, state.version()); logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}], current state version [{}]", source, state.version()), e);
} }
} }
}); });
} catch (Exception e) { } catch (Exception e) {
rerouting.set(false); rerouting.set(false);
ClusterState state = clusterService.state(); ClusterState state = clusterService.state();
logger.warn("failed to reroute routing table, current state:\n{}", e, state.prettyPrint()); logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to reroute routing table, current state:\n{}", state.prettyPrint()), e);
} }
} }
} }

View File

@ -27,8 +27,8 @@ import org.elasticsearch.cluster.Diffable;
import org.elasticsearch.cluster.DiffableUtils; import org.elasticsearch.cluster.DiffableUtils;
import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource; import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
@ -88,6 +88,11 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
return indicesRouting.containsKey(index); return indicesRouting.containsKey(index);
} }
public boolean hasIndex(Index index) {
IndexRoutingTable indexRouting = index(index.getName());
return indexRouting != null && indexRouting.getIndex().equals(index);
}
public IndexRoutingTable index(String index) { public IndexRoutingTable index(String index) {
return indicesRouting.get(index); return indicesRouting.get(index);
} }

View File

@ -19,6 +19,7 @@
package org.elasticsearch.cluster.routing.allocation.allocator; package org.elasticsearch.cluster.routing.allocation.allocator;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.IntroSorter; import org.apache.lucene.util.IntroSorter;
import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData;
@ -37,7 +38,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider
import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Setting.Property;
@ -209,7 +209,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
* A {@link Balancer} * A {@link Balancer}
*/ */
public static class Balancer { public static class Balancer {
private final ESLogger logger; private final Logger logger;
private final Map<String, ModelNode> nodes = new HashMap<>(); private final Map<String, ModelNode> nodes = new HashMap<>();
private final RoutingAllocation allocation; private final RoutingAllocation allocation;
private final RoutingNodes routingNodes; private final RoutingNodes routingNodes;
@ -219,7 +219,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
private final MetaData metaData; private final MetaData metaData;
private final float avgShardsPerNode; private final float avgShardsPerNode;
public Balancer(ESLogger logger, RoutingAllocation allocation, WeightFunction weight, float threshold) { public Balancer(Logger logger, RoutingAllocation allocation, WeightFunction weight, float threshold) {
this.logger = logger; this.logger = logger;
this.allocation = allocation; this.allocation = allocation;
this.weight = weight; this.weight = weight;

View File

@ -78,11 +78,10 @@ public class DiskThresholdDecider extends AllocationDecider {
* Returns the size of all shards that are currently being relocated to * Returns the size of all shards that are currently being relocated to
* the node, but may not be finished transferring yet. * the node, but may not be finished transferring yet.
* *
* If subtractShardsMovingAway is set then the size of shards moving away is subtracted from the total size * If subtractShardsMovingAway is true then the size of shards moving away is subtracted from the total size of all shards
* of all shards
*/ */
static long sizeOfRelocatingShards(RoutingNode node, RoutingAllocation allocation, static long sizeOfRelocatingShards(RoutingNode node, RoutingAllocation allocation,
boolean subtractShardsMovingAway, String dataPath) { boolean subtractShardsMovingAway, String dataPath) {
ClusterInfo clusterInfo = allocation.clusterInfo(); ClusterInfo clusterInfo = allocation.clusterInfo();
long totalSize = 0; long totalSize = 0;
for (ShardRouting routing : node.shardsWithState(ShardRoutingState.RELOCATING, ShardRoutingState.INITIALIZING)) { for (ShardRouting routing : node.shardsWithState(ShardRoutingState.RELOCATING, ShardRoutingState.INITIALIZING)) {
@ -111,7 +110,9 @@ public class DiskThresholdDecider extends AllocationDecider {
final double usedDiskThresholdLow = 100.0 - diskThresholdSettings.getFreeDiskThresholdLow(); final double usedDiskThresholdLow = 100.0 - diskThresholdSettings.getFreeDiskThresholdLow();
final double usedDiskThresholdHigh = 100.0 - diskThresholdSettings.getFreeDiskThresholdHigh(); final double usedDiskThresholdHigh = 100.0 - diskThresholdSettings.getFreeDiskThresholdHigh();
DiskUsage usage = getDiskUsage(node, allocation, usages); // subtractLeavingShards is passed as false here, because they still use disk space, and therefore should we should be extra careful
// and take the size into account
DiskUsage usage = getDiskUsage(node, allocation, usages, false);
// First, check that the node currently over the low watermark // First, check that the node currently over the low watermark
double freeDiskPercentage = usage.getFreeDiskAsPercentage(); double freeDiskPercentage = usage.getFreeDiskAsPercentage();
// Cache the used disk percentage for displaying disk percentages consistent with documentation // Cache the used disk percentage for displaying disk percentages consistent with documentation
@ -243,7 +244,9 @@ public class DiskThresholdDecider extends AllocationDecider {
return decision; return decision;
} }
final DiskUsage usage = getDiskUsage(node, allocation, usages); // subtractLeavingShards is passed as true here, since this is only for shards remaining, we will *eventually* have enough disk
// since shards are moving away. No new shards will be incoming since in canAllocate we pass false for this check.
final DiskUsage usage = getDiskUsage(node, allocation, usages, true);
final String dataPath = clusterInfo.getDataPath(shardRouting); final String dataPath = clusterInfo.getDataPath(shardRouting);
// If this node is already above the high threshold, the shard cannot remain (get it off!) // If this node is already above the high threshold, the shard cannot remain (get it off!)
final double freeDiskPercentage = usage.getFreeDiskAsPercentage(); final double freeDiskPercentage = usage.getFreeDiskAsPercentage();
@ -280,7 +283,8 @@ public class DiskThresholdDecider extends AllocationDecider {
"there is enough disk on this node for the shard to remain, free: [%s]", new ByteSizeValue(freeBytes)); "there is enough disk on this node for the shard to remain, free: [%s]", new ByteSizeValue(freeBytes));
} }
private DiskUsage getDiskUsage(RoutingNode node, RoutingAllocation allocation, ImmutableOpenMap<String, DiskUsage> usages) { private DiskUsage getDiskUsage(RoutingNode node, RoutingAllocation allocation,
ImmutableOpenMap<String, DiskUsage> usages, boolean subtractLeavingShards) {
DiskUsage usage = usages.get(node.nodeId()); DiskUsage usage = usages.get(node.nodeId());
if (usage == null) { if (usage == null) {
// If there is no usage, and we have other nodes in the cluster, // If there is no usage, and we have other nodes in the cluster,
@ -293,7 +297,7 @@ public class DiskThresholdDecider extends AllocationDecider {
} }
if (diskThresholdSettings.includeRelocations()) { if (diskThresholdSettings.includeRelocations()) {
long relocatingShardsSize = sizeOfRelocatingShards(node, allocation, true, usage.getPath()); long relocatingShardsSize = sizeOfRelocatingShards(node, allocation, subtractLeavingShards, usage.getPath());
DiskUsage usageIncludingRelocations = new DiskUsage(node.nodeId(), node.node().getName(), usage.getPath(), DiskUsage usageIncludingRelocations = new DiskUsage(node.nodeId(), node.node().getName(), usage.getPath(),
usage.getTotalBytes(), usage.getFreeBytes() - relocatingShardsSize); usage.getTotalBytes(), usage.getFreeBytes() - relocatingShardsSize);
if (logger.isTraceEnabled()) { if (logger.isTraceEnabled()) {

View File

@ -19,6 +19,9 @@
package org.elasticsearch.cluster.service; package org.elasticsearch.cluster.service;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.cluster.AckedClusterStateTaskListener; import org.elasticsearch.cluster.AckedClusterStateTaskListener;
import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterName;
@ -43,7 +46,6 @@ import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Priority; import org.elasticsearch.common.Priority;
import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting;
@ -554,9 +556,16 @@ public class ClusterService extends AbstractLifecycleComponent {
} catch (Exception e) { } catch (Exception e) {
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS))); TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
if (logger.isTraceEnabled()) { if (logger.isTraceEnabled()) {
logger.trace("failed to execute cluster state update in [{}], state:\nversion [{}], source [{}]\n{}{}{}", e, executionTime, logger.trace(
previousClusterState.version(), tasksSummary, previousClusterState.nodes().prettyPrint(), (Supplier<?>) () -> new ParameterizedMessage(
previousClusterState.routingTable().prettyPrint(), previousClusterState.getRoutingNodes().prettyPrint()); "failed to execute cluster state update in [{}], state:\nversion [{}], source [{}]\n{}{}{}",
executionTime,
previousClusterState.version(),
tasksSummary,
previousClusterState.nodes().prettyPrint(),
previousClusterState.routingTable().prettyPrint(),
previousClusterState.getRoutingNodes().prettyPrint()),
e);
} }
warnAboutSlowTaskIfNeeded(executionTime, tasksSummary); warnAboutSlowTaskIfNeeded(executionTime, tasksSummary);
batchResult = ClusterStateTaskExecutor.BatchResult.<T>builder() batchResult = ClusterStateTaskExecutor.BatchResult.<T>builder()
@ -587,7 +596,9 @@ public class ClusterService extends AbstractLifecycleComponent {
executionResult.handle( executionResult.handle(
() -> proccessedListeners.add(updateTask), () -> proccessedListeners.add(updateTask),
ex -> { ex -> {
logger.debug("cluster state update task {} failed", ex, updateTask.toString(executor)); logger.debug(
(Supplier<?>)
() -> new ParameterizedMessage("cluster state update task {} failed", updateTask.toString(executor)), ex);
updateTask.listener.onFailure(updateTask.source, ex); updateTask.listener.onFailure(updateTask.source, ex);
} }
); );
@ -670,7 +681,11 @@ public class ClusterService extends AbstractLifecycleComponent {
try { try {
clusterStatePublisher.accept(clusterChangedEvent, ackListener); clusterStatePublisher.accept(clusterChangedEvent, ackListener);
} catch (Discovery.FailedToCommitClusterStateException t) { } catch (Discovery.FailedToCommitClusterStateException t) {
logger.warn("failing [{}]: failed to commit cluster state version [{}]", t, tasksSummary, newClusterState.version()); final long version = newClusterState.version();
logger.warn(
(Supplier<?>) () -> new ParameterizedMessage(
"failing [{}]: failed to commit cluster state version [{}]", tasksSummary, version),
t);
proccessedListeners.forEach(task -> task.listener.onFailure(task.source, t)); proccessedListeners.forEach(task -> task.listener.onFailure(task.source, t));
return; return;
} }
@ -713,7 +728,10 @@ public class ClusterService extends AbstractLifecycleComponent {
try { try {
ackListener.onNodeAck(newClusterState.nodes().getLocalNode(), null); ackListener.onNodeAck(newClusterState.nodes().getLocalNode(), null);
} catch (Exception e) { } catch (Exception e) {
logger.debug("error while processing ack for master node [{}]", e, newClusterState.nodes().getLocalNode()); final DiscoveryNode localNode = newClusterState.nodes().getLocalNode();
logger.debug(
(Supplier<?>) () -> new ParameterizedMessage("error while processing ack for master node [{}]", localNode),
e);
} }
} }
@ -724,7 +742,11 @@ public class ClusterService extends AbstractLifecycleComponent {
try { try {
executor.clusterStatePublished(clusterChangedEvent); executor.clusterStatePublished(clusterChangedEvent);
} catch (Exception e) { } catch (Exception e) {
logger.error("exception thrown while notifying executor of new cluster state publication [{}]", e, tasksSummary); logger.error(
(Supplier<?>) () -> new ParameterizedMessage(
"exception thrown while notifying executor of new cluster state publication [{}]",
tasksSummary),
e);
} }
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS))); TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
@ -733,8 +755,18 @@ public class ClusterService extends AbstractLifecycleComponent {
warnAboutSlowTaskIfNeeded(executionTime, tasksSummary); warnAboutSlowTaskIfNeeded(executionTime, tasksSummary);
} catch (Exception e) { } catch (Exception e) {
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS))); TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
logger.warn("failed to apply updated cluster state in [{}]:\nversion [{}], uuid [{}], source [{}]\n{}", e, executionTime, final long version = newClusterState.version();
newClusterState.version(), newClusterState.stateUUID(), tasksSummary, newClusterState.prettyPrint()); final String stateUUID = newClusterState.stateUUID();
final String prettyPrint = newClusterState.prettyPrint();
logger.warn(
(Supplier<?>) () -> new ParameterizedMessage(
"failed to apply updated cluster state in [{}]:\nversion [{}], uuid [{}], source [{}]\n{}",
executionTime,
version,
stateUUID,
tasksSummary,
prettyPrint),
e);
// TODO: do we want to call updateTask.onFailure here? // TODO: do we want to call updateTask.onFailure here?
} }
@ -743,7 +775,7 @@ public class ClusterService extends AbstractLifecycleComponent {
// this one is overridden in tests so we can control time // this one is overridden in tests so we can control time
protected long currentTimeInNanos() {return System.nanoTime();} protected long currentTimeInNanos() {return System.nanoTime();}
private static SafeClusterStateTaskListener safe(ClusterStateTaskListener listener, ESLogger logger) { private static SafeClusterStateTaskListener safe(ClusterStateTaskListener listener, Logger logger) {
if (listener instanceof AckedClusterStateTaskListener) { if (listener instanceof AckedClusterStateTaskListener) {
return new SafeAckedClusterStateTaskListener((AckedClusterStateTaskListener) listener, logger); return new SafeAckedClusterStateTaskListener((AckedClusterStateTaskListener) listener, logger);
} else { } else {
@ -753,9 +785,9 @@ public class ClusterService extends AbstractLifecycleComponent {
private static class SafeClusterStateTaskListener implements ClusterStateTaskListener { private static class SafeClusterStateTaskListener implements ClusterStateTaskListener {
private final ClusterStateTaskListener listener; private final ClusterStateTaskListener listener;
private final ESLogger logger; private final Logger logger;
public SafeClusterStateTaskListener(ClusterStateTaskListener listener, ESLogger logger) { public SafeClusterStateTaskListener(ClusterStateTaskListener listener, Logger logger) {
this.listener = listener; this.listener = listener;
this.logger = logger; this.logger = logger;
} }
@ -766,7 +798,9 @@ public class ClusterService extends AbstractLifecycleComponent {
listener.onFailure(source, e); listener.onFailure(source, e);
} catch (Exception inner) { } catch (Exception inner) {
inner.addSuppressed(e); inner.addSuppressed(e);
logger.error("exception thrown by listener notifying of failure from [{}]", inner, source); logger.error(
(Supplier<?>) () -> new ParameterizedMessage(
"exception thrown by listener notifying of failure from [{}]", source), inner);
} }
} }
@ -775,7 +809,9 @@ public class ClusterService extends AbstractLifecycleComponent {
try { try {
listener.onNoLongerMaster(source); listener.onNoLongerMaster(source);
} catch (Exception e) { } catch (Exception e) {
logger.error("exception thrown by listener while notifying no longer master from [{}]", e, source); logger.error(
(Supplier<?>) () -> new ParameterizedMessage(
"exception thrown by listener while notifying no longer master from [{}]", source), e);
} }
} }
@ -785,21 +821,22 @@ public class ClusterService extends AbstractLifecycleComponent {
listener.clusterStateProcessed(source, oldState, newState); listener.clusterStateProcessed(source, oldState, newState);
} catch (Exception e) { } catch (Exception e) {
logger.error( logger.error(
(Supplier<?>) () -> new ParameterizedMessage(
"exception thrown by listener while notifying of cluster state processed from [{}], old cluster state:\n" + "exception thrown by listener while notifying of cluster state processed from [{}], old cluster state:\n" +
"{}\nnew cluster state:\n{}", "{}\nnew cluster state:\n{}",
e,
source, source,
oldState.prettyPrint(), oldState.prettyPrint(),
newState.prettyPrint()); newState.prettyPrint()),
e);
} }
} }
} }
private static class SafeAckedClusterStateTaskListener extends SafeClusterStateTaskListener implements AckedClusterStateTaskListener { private static class SafeAckedClusterStateTaskListener extends SafeClusterStateTaskListener implements AckedClusterStateTaskListener {
private final AckedClusterStateTaskListener listener; private final AckedClusterStateTaskListener listener;
private final ESLogger logger; private final Logger logger;
public SafeAckedClusterStateTaskListener(AckedClusterStateTaskListener listener, ESLogger logger) { public SafeAckedClusterStateTaskListener(AckedClusterStateTaskListener listener, Logger logger) {
super(listener, logger); super(listener, logger);
this.listener = listener; this.listener = listener;
this.logger = logger; this.logger = logger;
@ -996,7 +1033,7 @@ public class ClusterService extends AbstractLifecycleComponent {
private static class AckCountDownListener implements Discovery.AckListener { private static class AckCountDownListener implements Discovery.AckListener {
private static final ESLogger logger = Loggers.getLogger(AckCountDownListener.class); private static final Logger logger = Loggers.getLogger(AckCountDownListener.class);
private final AckedClusterStateTaskListener ackedTaskListener; private final AckedClusterStateTaskListener ackedTaskListener;
private final CountDown countDown; private final CountDown countDown;
@ -1040,7 +1077,10 @@ public class ClusterService extends AbstractLifecycleComponent {
logger.trace("ack received from node [{}], cluster_state update (version: {})", node, clusterStateVersion); logger.trace("ack received from node [{}], cluster_state update (version: {})", node, clusterStateVersion);
} else { } else {
this.lastFailure = e; this.lastFailure = e;
logger.debug("ack received from node [{}], cluster_state update (version: {})", e, node, clusterStateVersion); logger.debug(
(Supplier<?>) () -> new ParameterizedMessage(
"ack received from node [{}], cluster_state update (version: {})", node, clusterStateVersion),
e);
} }
if (countDown.countDown()) { if (countDown.countDown()) {

View File

@ -16,13 +16,14 @@
* specific language governing permissions and limitations * specific language governing permissions and limitations
* under the License. * under the License.
*/ */
package org.elasticsearch.common;
package org.elasticsearch.common;
import java.lang.annotation.ElementType; import java.lang.annotation.ElementType;
import java.lang.annotation.Retention; import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy; import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target; import java.lang.annotation.Target;
/** /**
* Annotation to suppress logging usage checks errors inside a whole class or a method. * Annotation to suppress logging usage checks errors inside a whole class or a method.
*/ */

View File

@ -19,7 +19,7 @@
package org.elasticsearch.common.breaker; package org.elasticsearch.common.breaker;
import org.elasticsearch.common.logging.ESLogger; import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.indices.breaker.BreakerSettings; import org.elasticsearch.indices.breaker.BreakerSettings;
import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
@ -36,7 +36,7 @@ public class ChildMemoryCircuitBreaker implements CircuitBreaker {
private final double overheadConstant; private final double overheadConstant;
private final AtomicLong used; private final AtomicLong used;
private final AtomicLong trippedCount; private final AtomicLong trippedCount;
private final ESLogger logger; private final Logger logger;
private final HierarchyCircuitBreakerService parent; private final HierarchyCircuitBreakerService parent;
private final String name; private final String name;
@ -48,7 +48,7 @@ public class ChildMemoryCircuitBreaker implements CircuitBreaker {
* @param parent parent circuit breaker service to delegate tripped breakers to * @param parent parent circuit breaker service to delegate tripped breakers to
* @param name the name of the breaker * @param name the name of the breaker
*/ */
public ChildMemoryCircuitBreaker(BreakerSettings settings, ESLogger logger, public ChildMemoryCircuitBreaker(BreakerSettings settings, Logger logger,
HierarchyCircuitBreakerService parent, String name) { HierarchyCircuitBreakerService parent, String name) {
this(settings, null, logger, parent, name); this(settings, null, logger, parent, name);
} }
@ -64,7 +64,7 @@ public class ChildMemoryCircuitBreaker implements CircuitBreaker {
* @param oldBreaker the previous circuit breaker to inherit the used value from (starting offset) * @param oldBreaker the previous circuit breaker to inherit the used value from (starting offset)
*/ */
public ChildMemoryCircuitBreaker(BreakerSettings settings, ChildMemoryCircuitBreaker oldBreaker, public ChildMemoryCircuitBreaker(BreakerSettings settings, ChildMemoryCircuitBreaker oldBreaker,
ESLogger logger, HierarchyCircuitBreakerService parent, String name) { Logger logger, HierarchyCircuitBreakerService parent, String name) {
this.name = name; this.name = name;
this.settings = settings; this.settings = settings;
this.memoryBytesLimit = settings.getLimit(); this.memoryBytesLimit = settings.getLimit();

View File

@ -18,7 +18,7 @@
*/ */
package org.elasticsearch.common.breaker; package org.elasticsearch.common.breaker;
import org.elasticsearch.common.logging.ESLogger; import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.ByteSizeValue;
import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicLong;
@ -33,7 +33,7 @@ public class MemoryCircuitBreaker implements CircuitBreaker {
private final double overheadConstant; private final double overheadConstant;
private final AtomicLong used; private final AtomicLong used;
private final AtomicLong trippedCount; private final AtomicLong trippedCount;
private final ESLogger logger; private final Logger logger;
/** /**
@ -43,7 +43,7 @@ public class MemoryCircuitBreaker implements CircuitBreaker {
* @param limit circuit breaker limit * @param limit circuit breaker limit
* @param overheadConstant constant multiplier for byte estimations * @param overheadConstant constant multiplier for byte estimations
*/ */
public MemoryCircuitBreaker(ByteSizeValue limit, double overheadConstant, ESLogger logger) { public MemoryCircuitBreaker(ByteSizeValue limit, double overheadConstant, Logger logger) {
this(limit, overheadConstant, null, logger); this(limit, overheadConstant, null, logger);
} }
@ -56,7 +56,7 @@ public class MemoryCircuitBreaker implements CircuitBreaker {
* @param overheadConstant constant multiplier for byte estimations * @param overheadConstant constant multiplier for byte estimations
* @param oldBreaker the previous circuit breaker to inherit the used value from (starting offset) * @param oldBreaker the previous circuit breaker to inherit the used value from (starting offset)
*/ */
public MemoryCircuitBreaker(ByteSizeValue limit, double overheadConstant, MemoryCircuitBreaker oldBreaker, ESLogger logger) { public MemoryCircuitBreaker(ByteSizeValue limit, double overheadConstant, MemoryCircuitBreaker oldBreaker, Logger logger) {
this.memoryBytesLimit = limit.bytes(); this.memoryBytesLimit = limit.bytes();
this.overheadConstant = overheadConstant; this.overheadConstant = overheadConstant;
if (oldBreaker == null) { if (oldBreaker == null) {

View File

@ -19,19 +19,17 @@
package org.elasticsearch.common.component; package org.elasticsearch.common.component;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.node.Node; import org.elasticsearch.node.Node;
/**
*
*/
public abstract class AbstractComponent { public abstract class AbstractComponent {
protected final ESLogger logger; protected final Logger logger;
protected final DeprecationLogger deprecationLogger; protected final DeprecationLogger deprecationLogger;
protected final Settings settings; protected final Settings settings;
@ -42,7 +40,7 @@ public abstract class AbstractComponent {
} }
public AbstractComponent(Settings settings, Class customClass) { public AbstractComponent(Settings settings, Class customClass) {
this.logger = Loggers.getLogger(customClass, settings); this.logger = LogManager.getLogger(customClass);
this.deprecationLogger = new DeprecationLogger(logger); this.deprecationLogger = new DeprecationLogger(logger);
this.settings = settings; this.settings = settings;
} }
@ -71,4 +69,5 @@ public abstract class AbstractComponent {
deprecationLogger.deprecated("Setting [{}] has been removed, use [{}] instead", settingName, alternativeName); deprecationLogger.deprecated("Setting [{}] has been removed, use [{}] instead", settingName, alternativeName);
} }
} }
} }

View File

@ -19,20 +19,15 @@
package org.elasticsearch.common.geo.builders; package org.elasticsearch.common.geo.builders;
import org.locationtech.spatial4j.context.jts.JtsSpatialContext;
import org.locationtech.spatial4j.exception.InvalidShapeException;
import org.locationtech.spatial4j.shape.Shape;
import org.locationtech.spatial4j.shape.jts.JtsGeometry;
import com.vividsolutions.jts.geom.Coordinate; import com.vividsolutions.jts.geom.Coordinate;
import com.vividsolutions.jts.geom.Geometry; import com.vividsolutions.jts.geom.Geometry;
import com.vividsolutions.jts.geom.GeometryFactory; import com.vividsolutions.jts.geom.GeometryFactory;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.action.support.ToXContentToBytes; import org.elasticsearch.action.support.ToXContentToBytes;
import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.NamedWriteable;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.unit.DistanceUnit.Distance; import org.elasticsearch.common.unit.DistanceUnit.Distance;
import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContent;
@ -40,6 +35,10 @@ import org.elasticsearch.common.xcontent.XContent;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.mapper.GeoShapeFieldMapper; import org.elasticsearch.index.mapper.GeoShapeFieldMapper;
import org.locationtech.spatial4j.context.jts.JtsSpatialContext;
import org.locationtech.spatial4j.exception.InvalidShapeException;
import org.locationtech.spatial4j.shape.Shape;
import org.locationtech.spatial4j.shape.jts.JtsGeometry;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
@ -53,7 +52,7 @@ import java.util.Locale;
*/ */
public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWriteable { public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWriteable {
protected static final ESLogger LOGGER = ESLoggerFactory.getLogger(ShapeBuilder.class.getName()); protected static final Logger LOGGER = ESLoggerFactory.getLogger(ShapeBuilder.class.getName());
private static final boolean DEBUG; private static final boolean DEBUG;
static { static {

View File

@ -16,6 +16,7 @@
package org.elasticsearch.common.inject.spi; package org.elasticsearch.common.inject.spi;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.inject.Binder; import org.elasticsearch.common.inject.Binder;
import org.elasticsearch.common.inject.Binding; import org.elasticsearch.common.inject.Binding;
@ -40,7 +41,6 @@ import org.elasticsearch.common.inject.internal.PrivateElementsImpl;
import org.elasticsearch.common.inject.internal.ProviderMethodsModule; import org.elasticsearch.common.inject.internal.ProviderMethodsModule;
import org.elasticsearch.common.inject.internal.SourceProvider; import org.elasticsearch.common.inject.internal.SourceProvider;
import org.elasticsearch.common.inject.matcher.Matcher; import org.elasticsearch.common.inject.matcher.Matcher;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.logging.Loggers;
import java.lang.annotation.Annotation; import java.lang.annotation.Annotation;
@ -351,7 +351,7 @@ public final class Elements {
return builder; return builder;
} }
private static ESLogger logger = Loggers.getLogger(Elements.class); private static Logger logger = Loggers.getLogger(Elements.class);
protected Object getSource() { protected Object getSource() {
Object ret; Object ret;

Some files were not shown because too many files have changed in this diff Show More