Merge branch 'master' into rolling_upgrades

This commit is contained in:
Ryan Ernst 2016-09-06 15:52:03 -07:00
commit e6ac27fcfa
631 changed files with 8101 additions and 4692 deletions

5
.gitignore vendored
View File

@ -20,6 +20,11 @@ nbactions.xml
.gradle/
build/
# gradle wrapper
/gradle/
gradlew
gradlew.bat
# maven stuff (to be removed when trunk becomes 4.x)
*-execution-hints.log
target/

View File

@ -18,6 +18,7 @@
*/
package org.elasticsearch.gradle.doc
import org.elasticsearch.gradle.VersionProperties
import org.elasticsearch.gradle.test.RestTestPlugin
import org.gradle.api.Project
import org.gradle.api.Task
@ -30,9 +31,19 @@ public class DocsTestPlugin extends RestTestPlugin {
@Override
public void apply(Project project) {
super.apply(project)
Map<String, String> defaultSubstitutions = [
/* These match up with the asciidoc syntax for substitutions but
* the values may differ. In particular {version} needs to resolve
* to the version being built for testing but needs to resolve to
* the last released version for docs. */
'\\{version\\}':
VersionProperties.elasticsearch.replace('-SNAPSHOT', ''),
'\\{lucene_version\\}' : VersionProperties.lucene,
]
Task listSnippets = project.tasks.create('listSnippets', SnippetsTask)
listSnippets.group 'Docs'
listSnippets.description 'List each snippet'
listSnippets.defaultSubstitutions = defaultSubstitutions
listSnippets.perSnippet { println(it.toString()) }
Task listConsoleCandidates = project.tasks.create(
@ -40,6 +51,7 @@ public class DocsTestPlugin extends RestTestPlugin {
listConsoleCandidates.group 'Docs'
listConsoleCandidates.description
'List snippets that probably should be marked // CONSOLE'
listConsoleCandidates.defaultSubstitutions = defaultSubstitutions
listConsoleCandidates.perSnippet {
if (
it.console != null // Already marked, nothing to do
@ -47,19 +59,17 @@ public class DocsTestPlugin extends RestTestPlugin {
) {
return
}
List<String> languages = [
// This language should almost always be marked console
'js',
// These are often curl commands that should be converted but
// are probably false positives
'sh', 'shell',
]
if (false == languages.contains(it.language)) {
return
if ( // js almost always should be `// CONSOLE`
it.language == 'js' ||
// snippets containing `curl` *probably* should
// be `// CONSOLE`
it.curl) {
println(it.toString())
}
println(it.toString())
}
project.tasks.create('buildRestTests', RestTestsFromSnippetsTask)
Task buildRestTests = project.tasks.create(
'buildRestTests', RestTestsFromSnippetsTask)
buildRestTests.defaultSubstitutions = defaultSubstitutions
}
}

View File

@ -146,6 +146,9 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
void emitDo(String method, String pathAndQuery, String body,
String catchPart, List warnings, boolean inSetup) {
def (String path, String query) = pathAndQuery.tokenize('?')
if (path == null) {
path = '' // Catch requests to the root...
}
current.println(" - do:")
if (catchPart != null) {
current.println(" catch: $catchPart")

View File

@ -22,6 +22,7 @@ package org.elasticsearch.gradle.doc
import org.gradle.api.DefaultTask
import org.gradle.api.InvalidUserDataException
import org.gradle.api.file.ConfigurableFileTree
import org.gradle.api.tasks.Input
import org.gradle.api.tasks.InputFiles
import org.gradle.api.tasks.TaskAction
@ -60,6 +61,12 @@ public class SnippetsTask extends DefaultTask {
exclude 'build'
}
/**
* Substitutions done on every snippet's contents.
*/
@Input
Map<String, String> defaultSubstitutions = [:]
@TaskAction
public void executeTask() {
/*
@ -75,21 +82,39 @@ public class SnippetsTask extends DefaultTask {
Closure emit = {
snippet.contents = contents.toString()
contents = null
Closure doSubstitution = { String pattern, String subst ->
/*
* $body is really common but it looks like a
* backreference so we just escape it here to make the
* tests cleaner.
*/
subst = subst.replace('$body', '\\$body')
// \n is a new line....
subst = subst.replace('\\n', '\n')
snippet.contents = snippet.contents.replaceAll(
pattern, subst)
}
defaultSubstitutions.each doSubstitution
if (substitutions != null) {
substitutions.each { String pattern, String subst ->
/*
* $body is really common but it looks like a
* backreference so we just escape it here to make the
* tests cleaner.
*/
subst = subst.replace('$body', '\\$body')
// \n is a new line....
subst = subst.replace('\\n', '\n')
snippet.contents = snippet.contents.replaceAll(
pattern, subst)
}
substitutions.each doSubstitution
substitutions = null
}
if (snippet.language == null) {
throw new InvalidUserDataException("$snippet: "
+ "Snippet missing a language. This is required by "
+ "Elasticsearch's doc testing infrastructure so we "
+ "be sure we don't accidentally forget to test a "
+ "snippet.")
}
// Try to detect snippets that contain `curl`
if (snippet.language == 'sh' || snippet.language == 'shell') {
snippet.curl = snippet.contents.contains('curl')
if (snippet.console == false && snippet.curl == false) {
throw new InvalidUserDataException("$snippet: "
+ "No need for NOTCONSOLE if snippet doesn't "
+ "contain `curl`.")
}
}
perSnippet(snippet)
snippet = null
}
@ -107,7 +132,7 @@ public class SnippetsTask extends DefaultTask {
}
return
}
matcher = line =~ /\[source,(\w+)]\s*/
matcher = line =~ /\["?source"?,\s*"?(\w+)"?(,.*)?].*/
if (matcher.matches()) {
lastLanguage = matcher.group(1)
lastLanguageLine = lineNumber
@ -250,6 +275,7 @@ public class SnippetsTask extends DefaultTask {
String language = null
String catchPart = null
String setup = null
boolean curl
List warnings = new ArrayList()
@Override
@ -285,6 +311,9 @@ public class SnippetsTask extends DefaultTask {
if (testSetup) {
result += '// TESTSETUP'
}
if (curl) {
result += '(curl)'
}
return result
}
}

View File

@ -97,8 +97,8 @@ public class PluginBuildPlugin extends BuildPlugin {
// with a full elasticsearch server that includes optional deps
provided "org.locationtech.spatial4j:spatial4j:${project.versions.spatial4j}"
provided "com.vividsolutions:jts:${project.versions.jts}"
provided "log4j:log4j:${project.versions.log4j}"
provided "log4j:apache-log4j-extras:${project.versions.log4j}"
provided "org.apache.logging.log4j:log4j-api:${project.versions.log4j}"
provided "org.apache.logging.log4j:log4j-core:${project.versions.log4j}"
provided "net.java.dev.jna:jna:${project.versions.jna}"
}
}

View File

@ -59,7 +59,8 @@ class PrecommitTasks {
* use the NamingConventionsCheck we break the circular dependency
* here.
*/
precommitTasks.add(configureLoggerUsage(project))
// https://github.com/elastic/elasticsearch/issues/20243
// precommitTasks.add(configureLoggerUsage(project))
}

View File

@ -10,6 +10,9 @@
<suppress files="org[/\\]elasticsearch[/\\]painless[/\\]antlr[/\\]PainlessLexer\.java" checks="." />
<suppress files="org[/\\]elasticsearch[/\\]painless[/\\]antlr[/\\]PainlessParser(|BaseVisitor|Visitor)\.java" checks="." />
<!-- ThrowableProxy is a forked copy from Log4j to hack around a bug; this can be removed when the hack is removed -->
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]apache[/\\]logging[/\\]log4j[/\\]core[/\\]impl[/\\]ThrowableProxy.java" checks="RegexpSinglelineJava" />
<!-- Hopefully temporary suppression of LineLength on files that don't pass it. We should remove these when we the
files start to pass. -->
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]apache[/\\]lucene[/\\]queries[/\\]BlendedTermQuery.java" checks="LineLength" />
@ -21,7 +24,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]health[/\\]ClusterHealthRequestBuilder.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]health[/\\]TransportClusterHealthAction.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]node[/\\]hotthreads[/\\]NodesHotThreadsRequestBuilder.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]node[/\\]info[/\\]NodeInfo.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]node[/\\]stats[/\\]NodesStatsRequestBuilder.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]repositories[/\\]delete[/\\]DeleteRepositoryRequestBuilder.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]repositories[/\\]delete[/\\]TransportDeleteRepositoryAction.java" checks="LineLength" />
@ -301,7 +303,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]network[/\\]NetworkModule.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]network[/\\]NetworkService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]recycler[/\\]Recyclers.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]unit[/\\]ByteSizeValue.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]BigArrays.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]CancellableThreads.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]CollectionUtils.java" checks="LineLength" />
@ -386,7 +387,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]MapperService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]Mapping.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]MetadataFieldMapper.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]ParsedDocument.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]CompletionFieldMapper.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]LegacyDateFieldMapper.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]LegacyDoubleFieldMapper.java" checks="LineLength" />
@ -469,9 +469,7 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]jvm[/\\]JvmStats.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]node[/\\]Node.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]node[/\\]internal[/\\]InternalSettingsPreparer.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]DummyPluginInfo.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]PluginsService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]RemovePluginCommand.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]RepositoriesService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]Repository.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]RepositoryModule.java" checks="LineLength" />
@ -627,7 +625,7 @@
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]SimulatePipelineResponseTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]WriteableIngestDocumentTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]SearchRequestBuilderTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]AutoCreateIndexTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]IndicesOptionsTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]IndicesOptionsTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]TransportActionFilterChainTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]WaitActiveShardCountIT.java" checks="LineLength" />
@ -865,7 +863,6 @@
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]CorruptedFileIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]CorruptedTranslogIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]DirectoryUtilsTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]ExceptionRetryIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]IndexStoreTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]StoreTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]suggest[/\\]stats[/\\]SuggestStatsIT.java" checks="LineLength" />
@ -905,8 +902,6 @@
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]template[/\\]SimpleIndexTemplateIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]mget[/\\]SimpleMgetIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]jvm[/\\]JvmGcMonitorServiceSettingsTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]os[/\\]OsProbeTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]nodesinfo[/\\]NodeInfoStreamingTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]PluginInfoTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]PluginsServiceTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]recovery[/\\]FullRollingRestartIT.java" checks="LineLength" />
@ -994,7 +989,6 @@
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]ESBlobStoreRepositoryIntegTestCase.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]geo[/\\]RandomShapeGenerator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]hamcrest[/\\]ElasticsearchGeoAssertions.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]threadpool[/\\]ThreadPoolSerializationTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]timestamp[/\\]SimpleTimestampIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]tribe[/\\]TribeIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]ttl[/\\]SimpleTTLIT.java" checks="LineLength" />

View File

@ -6,7 +6,7 @@ spatial4j = 0.6
jts = 1.13
jackson = 2.8.1
snakeyaml = 1.15
log4j = 1.2.17
log4j = 2.6.2
slf4j = 1.6.2
jna = 4.2.2

View File

@ -18,13 +18,13 @@
*/
package org.elasticsearch.client.benchmark.ops.bulk;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.client.benchmark.BenchmarkTask;
import org.elasticsearch.client.benchmark.metrics.Sample;
import org.elasticsearch.client.benchmark.metrics.SampleRecorder;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import java.io.BufferedReader;
@ -135,7 +135,7 @@ public class BulkBenchmarkTask implements BenchmarkTask {
private static final class BulkIndexer implements Runnable {
private static final ESLogger logger = ESLoggerFactory.getLogger(BulkIndexer.class.getName());
private static final Logger logger = ESLoggerFactory.getLogger(BulkIndexer.class.getName());
private final BlockingQueue<List<String>> bulkData;
private final int warmupIterations;

View File

@ -85,8 +85,10 @@ dependencies {
compile "com.vividsolutions:jts:${versions.jts}", optional
// logging
compile "log4j:log4j:${versions.log4j}", optional
compile "log4j:apache-log4j-extras:${versions.log4j}", optional
compile "org.apache.logging.log4j:log4j-api:${versions.log4j}", optional
compile "org.apache.logging.log4j:log4j-core:${versions.log4j}", optional
// to bridge dependencies that are still on Log4j 1 to Log4j 2
compile "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}", optional
compile "net.java.dev.jna:jna:${versions.jna}"
@ -154,32 +156,94 @@ thirdPartyAudit.excludes = [
// classes are missing!
// from com.fasterxml.jackson.dataformat.yaml.YAMLMapper (jackson-dataformat-yaml)
'com.fasterxml.jackson.databind.ObjectMapper',
'com.fasterxml.jackson.databind.ObjectMapper',
// from org.apache.log4j.receivers.net.JMSReceiver (log4j-extras)
'javax.jms.Message',
'javax.jms.MessageListener',
'javax.jms.ObjectMessage',
'javax.jms.TopicConnection',
'javax.jms.TopicConnectionFactory',
'javax.jms.TopicPublisher',
'javax.jms.TopicSession',
'javax.jms.TopicSubscriber',
// from log4j
'com.fasterxml.jackson.annotation.JsonInclude$Include',
'com.fasterxml.jackson.databind.DeserializationContext',
'com.fasterxml.jackson.databind.JsonMappingException',
'com.fasterxml.jackson.databind.JsonNode',
'com.fasterxml.jackson.databind.Module$SetupContext',
'com.fasterxml.jackson.databind.ObjectReader',
'com.fasterxml.jackson.databind.ObjectWriter',
'com.fasterxml.jackson.databind.SerializerProvider',
'com.fasterxml.jackson.databind.deser.std.StdDeserializer',
'com.fasterxml.jackson.databind.deser.std.StdScalarDeserializer',
'com.fasterxml.jackson.databind.module.SimpleModule',
'com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter',
'com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider',
'com.fasterxml.jackson.databind.ser.std.StdScalarSerializer',
'com.fasterxml.jackson.databind.ser.std.StdSerializer',
'com.fasterxml.jackson.dataformat.xml.JacksonXmlModule',
'com.fasterxml.jackson.dataformat.xml.XmlMapper',
'com.fasterxml.jackson.dataformat.xml.util.DefaultXmlPrettyPrinter',
'com.lmax.disruptor.BlockingWaitStrategy',
'com.lmax.disruptor.BusySpinWaitStrategy',
'com.lmax.disruptor.EventFactory',
'com.lmax.disruptor.EventTranslator',
'com.lmax.disruptor.EventTranslatorTwoArg',
'com.lmax.disruptor.EventTranslatorVararg',
'com.lmax.disruptor.ExceptionHandler',
'com.lmax.disruptor.LifecycleAware',
'com.lmax.disruptor.RingBuffer',
'com.lmax.disruptor.Sequence',
'com.lmax.disruptor.SequenceReportingEventHandler',
'com.lmax.disruptor.SleepingWaitStrategy',
'com.lmax.disruptor.TimeoutBlockingWaitStrategy',
'com.lmax.disruptor.WaitStrategy',
'com.lmax.disruptor.YieldingWaitStrategy',
'com.lmax.disruptor.dsl.Disruptor',
'com.lmax.disruptor.dsl.ProducerType',
'javax.jms.Connection',
'javax.jms.ConnectionFactory',
'javax.jms.Destination',
'javax.jms.Message',
'javax.jms.MessageConsumer',
'javax.jms.MessageListener',
'javax.jms.MessageProducer',
'javax.jms.ObjectMessage',
'javax.jms.Session',
'javax.mail.Authenticator',
'javax.mail.Message$RecipientType',
'javax.mail.PasswordAuthentication',
'javax.mail.Session',
'javax.mail.Transport',
'javax.mail.internet.InternetAddress',
'javax.mail.internet.InternetHeaders',
'javax.mail.internet.MimeBodyPart',
'javax.mail.internet.MimeMessage',
'javax.mail.internet.MimeMultipart',
'javax.mail.internet.MimeUtility',
'javax.mail.util.ByteArrayDataSource',
'javax.persistence.AttributeConverter',
'javax.persistence.EntityManager',
'javax.persistence.EntityManagerFactory',
'javax.persistence.EntityTransaction',
'javax.persistence.Persistence',
'javax.persistence.PersistenceException',
'org.apache.commons.compress.compressors.CompressorStreamFactory',
'org.apache.commons.compress.utils.IOUtils',
'org.apache.commons.csv.CSVFormat',
'org.apache.commons.csv.QuoteMode',
'org.apache.kafka.clients.producer.KafkaProducer',
'org.apache.kafka.clients.producer.Producer',
'org.apache.kafka.clients.producer.ProducerRecord',
'org.codehaus.stax2.XMLStreamWriter2',
'org.osgi.framework.AdaptPermission',
'org.osgi.framework.AdminPermission',
'org.osgi.framework.Bundle',
'org.osgi.framework.BundleActivator',
'org.osgi.framework.BundleContext',
'org.osgi.framework.BundleEvent',
'org.osgi.framework.BundleReference',
'org.osgi.framework.FrameworkUtil',
'org.osgi.framework.SynchronousBundleListener',
'org.osgi.framework.wiring.BundleWire',
'org.osgi.framework.wiring.BundleWiring',
'org.zeromq.ZMQ$Context',
'org.zeromq.ZMQ$Socket',
'org.zeromq.ZMQ',
// from org.apache.log4j.net.SMTPAppender (log4j)
'javax.mail.Authenticator',
'javax.mail.Message$RecipientType',
'javax.mail.Message',
'javax.mail.Multipart',
'javax.mail.PasswordAuthentication',
'javax.mail.Session',
'javax.mail.Transport',
'javax.mail.internet.InternetAddress',
'javax.mail.internet.InternetHeaders',
'javax.mail.internet.MimeBodyPart',
'javax.mail.internet.MimeMessage',
'javax.mail.internet.MimeMultipart',
'javax.mail.internet.MimeUtility',
// from org.locationtech.spatial4j.io.GeoJSONReader (spatial4j)
'org.noggit.JSONParser',
]

View File

@ -1,37 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.log4j;
import org.apache.log4j.helpers.ThreadLocalMap;
/**
* Log4j 1.2 MDC breaks because it parses java.version incorrectly (does not handle new java9 versioning).
*
* This hack fixes up the pkg private members as if it had detected the java version correctly.
*/
public class Java9Hack {
public static void fixLog4j() {
if (MDC.mdc.tlm == null) {
MDC.mdc.java1 = false;
MDC.mdc.tlm = new ThreadLocalMap();
}
}
}

View File

@ -1,23 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Hack to fix Log4j 1.2 in Java 9.
*/
package org.apache.log4j;

View File

@ -0,0 +1,665 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache license, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the license for the specific language governing permissions and
* limitations under the license.
*/
package org.apache.logging.log4j.core.impl;
import java.io.Serializable;
import java.net.URL;
import java.security.CodeSource;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.Stack;
import org.apache.logging.log4j.core.util.Loader;
import org.apache.logging.log4j.status.StatusLogger;
import org.apache.logging.log4j.util.ReflectionUtil;
import org.apache.logging.log4j.util.Strings;
/**
* Wraps a Throwable to add packaging information about each stack trace element.
*
* <p>
* A proxy is used to represent a throwable that may not exist in a different class loader or JVM. When an application
* deserializes a ThrowableProxy, the throwable may not be set, but the throwable's information is preserved in other
* fields of the proxy like the message and stack trace.
* </p>
*
* <p>
* TODO: Move this class to org.apache.logging.log4j.core because it is used from LogEvent.
* </p>
* <p>
* TODO: Deserialize: Try to rebuild Throwable if the target exception is in this class loader?
* </p>
*/
public class ThrowableProxy implements Serializable {
private static final String CAUSED_BY_LABEL = "Caused by: ";
private static final String SUPPRESSED_LABEL = "Suppressed: ";
private static final String WRAPPED_BY_LABEL = "Wrapped by: ";
/**
* Cached StackTracePackageElement and ClassLoader.
* <p>
* Consider this class private.
* </p>
*/
static class CacheEntry {
private final ExtendedClassInfo element;
private final ClassLoader loader;
public CacheEntry(final ExtendedClassInfo element, final ClassLoader loader) {
this.element = element;
this.loader = loader;
}
}
private static final ThrowableProxy[] EMPTY_THROWABLE_PROXY_ARRAY = new ThrowableProxy[0];
private static final char EOL = '\n';
private static final long serialVersionUID = -2752771578252251910L;
private final ThrowableProxy causeProxy;
private int commonElementCount;
private final ExtendedStackTraceElement[] extendedStackTrace;
private final String localizedMessage;
private final String message;
private final String name;
private final ThrowableProxy[] suppressedProxies;
private final transient Throwable throwable;
/**
* For JSON and XML IO via Jackson.
*/
@SuppressWarnings("unused")
private ThrowableProxy() {
this.throwable = null;
this.name = null;
this.extendedStackTrace = null;
this.causeProxy = null;
this.message = null;
this.localizedMessage = null;
this.suppressedProxies = EMPTY_THROWABLE_PROXY_ARRAY;
}
/**
* Constructs the wrapper for the Throwable that includes packaging data.
*
* @param throwable
* The Throwable to wrap, must not be null.
*/
public ThrowableProxy(final Throwable throwable) {
this(throwable, null);
}
/**
* Constructs the wrapper for the Throwable that includes packaging data.
*
* @param throwable
* The Throwable to wrap, must not be null.
* @param visited
* The set of visited suppressed exceptions.
*/
private ThrowableProxy(final Throwable throwable, final Set<Throwable> visited) {
this.throwable = throwable;
this.name = throwable.getClass().getName();
this.message = throwable.getMessage();
this.localizedMessage = throwable.getLocalizedMessage();
final Map<String, CacheEntry> map = new HashMap<>();
final Stack<Class<?>> stack = ReflectionUtil.getCurrentStackTrace();
this.extendedStackTrace = this.toExtendedStackTrace(stack, map, null, throwable.getStackTrace());
final Throwable throwableCause = throwable.getCause();
final Set<Throwable> causeVisited = new HashSet<>(1);
this.causeProxy = throwableCause == null ? null : new ThrowableProxy(throwable, stack, map, throwableCause, visited, causeVisited);
this.suppressedProxies = this.toSuppressedProxies(throwable, visited);
}
/**
* Constructs the wrapper for a Throwable that is referenced as the cause by another Throwable.
*
* @param parent
* The Throwable referencing this Throwable.
* @param stack
* The Class stack.
* @param map
* The cache containing the packaging data.
* @param cause
* The Throwable to wrap.
* @param suppressedVisited TODO
* @param causeVisited TODO
*/
private ThrowableProxy(final Throwable parent, final Stack<Class<?>> stack, final Map<String, CacheEntry> map,
final Throwable cause, final Set<Throwable> suppressedVisited, final Set<Throwable> causeVisited) {
causeVisited.add(cause);
this.throwable = cause;
this.name = cause.getClass().getName();
this.message = this.throwable.getMessage();
this.localizedMessage = this.throwable.getLocalizedMessage();
this.extendedStackTrace = this.toExtendedStackTrace(stack, map, parent.getStackTrace(), cause.getStackTrace());
final Throwable causeCause = cause.getCause();
this.causeProxy = causeCause == null || causeVisited.contains(causeCause) ? null : new ThrowableProxy(parent,
stack, map, causeCause, suppressedVisited, causeVisited);
this.suppressedProxies = this.toSuppressedProxies(cause, suppressedVisited);
}
@Override
public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (this.getClass() != obj.getClass()) {
return false;
}
final ThrowableProxy other = (ThrowableProxy) obj;
if (this.causeProxy == null) {
if (other.causeProxy != null) {
return false;
}
} else if (!this.causeProxy.equals(other.causeProxy)) {
return false;
}
if (this.commonElementCount != other.commonElementCount) {
return false;
}
if (this.name == null) {
if (other.name != null) {
return false;
}
} else if (!this.name.equals(other.name)) {
return false;
}
if (!Arrays.equals(this.extendedStackTrace, other.extendedStackTrace)) {
return false;
}
if (!Arrays.equals(this.suppressedProxies, other.suppressedProxies)) {
return false;
}
return true;
}
private void formatCause(final StringBuilder sb, final String prefix, final ThrowableProxy cause, final List<String> ignorePackages) {
formatThrowableProxy(sb, prefix, CAUSED_BY_LABEL, cause, ignorePackages);
}
private void formatThrowableProxy(final StringBuilder sb, final String prefix, final String causeLabel,
final ThrowableProxy throwableProxy, final List<String> ignorePackages) {
if (throwableProxy == null) {
return;
}
sb.append(prefix).append(causeLabel).append(throwableProxy).append(EOL);
this.formatElements(sb, prefix, throwableProxy.commonElementCount,
throwableProxy.getStackTrace(), throwableProxy.extendedStackTrace, ignorePackages);
this.formatSuppressed(sb, prefix + "\t", throwableProxy.suppressedProxies, ignorePackages);
this.formatCause(sb, prefix, throwableProxy.causeProxy, ignorePackages);
}
private void formatSuppressed(final StringBuilder sb, final String prefix, final ThrowableProxy[] suppressedProxies,
final List<String> ignorePackages) {
if (suppressedProxies == null) {
return;
}
for (final ThrowableProxy suppressedProxy : suppressedProxies) {
final ThrowableProxy cause = suppressedProxy;
formatThrowableProxy(sb, prefix, SUPPRESSED_LABEL, cause, ignorePackages);
}
}
private void formatElements(final StringBuilder sb, final String prefix, final int commonCount,
final StackTraceElement[] causedTrace, final ExtendedStackTraceElement[] extStackTrace,
final List<String> ignorePackages) {
if (ignorePackages == null || ignorePackages.isEmpty()) {
for (final ExtendedStackTraceElement element : extStackTrace) {
this.formatEntry(element, sb, prefix);
}
} else {
int count = 0;
for (int i = 0; i < extStackTrace.length; ++i) {
if (!this.ignoreElement(causedTrace[i], ignorePackages)) {
if (count > 0) {
appendSuppressedCount(sb, prefix, count);
count = 0;
}
this.formatEntry(extStackTrace[i], sb, prefix);
} else {
++count;
}
}
if (count > 0) {
appendSuppressedCount(sb, prefix, count);
}
}
if (commonCount != 0) {
sb.append(prefix).append("\t... ").append(commonCount).append(" more").append(EOL);
}
}
private void appendSuppressedCount(final StringBuilder sb, final String prefix, final int count) {
sb.append(prefix);
if (count == 1) {
sb.append("\t....").append(EOL);
} else {
sb.append("\t... suppressed ").append(count).append(" lines").append(EOL);
}
}
private void formatEntry(final ExtendedStackTraceElement extStackTraceElement, final StringBuilder sb, final String prefix) {
sb.append(prefix);
sb.append("\tat ");
sb.append(extStackTraceElement);
sb.append(EOL);
}
/**
* Formats the specified Throwable.
*
* @param sb
* StringBuilder to contain the formatted Throwable.
* @param cause
* The Throwable to format.
*/
public void formatWrapper(final StringBuilder sb, final ThrowableProxy cause) {
this.formatWrapper(sb, cause, null);
}
/**
* Formats the specified Throwable.
*
* @param sb
* StringBuilder to contain the formatted Throwable.
* @param cause
* The Throwable to format.
* @param packages
* The List of packages to be suppressed from the trace.
*/
@SuppressWarnings("ThrowableResultOfMethodCallIgnored")
public void formatWrapper(final StringBuilder sb, final ThrowableProxy cause, final List<String> packages) {
final Throwable caused = cause.getCauseProxy() != null ? cause.getCauseProxy().getThrowable() : null;
if (caused != null) {
this.formatWrapper(sb, cause.causeProxy);
sb.append(WRAPPED_BY_LABEL);
}
sb.append(cause).append(EOL);
this.formatElements(sb, "", cause.commonElementCount,
cause.getThrowable().getStackTrace(), cause.extendedStackTrace, packages);
}
public ThrowableProxy getCauseProxy() {
return this.causeProxy;
}
/**
* Format the Throwable that is the cause of this Throwable.
*
* @return The formatted Throwable that caused this Throwable.
*/
public String getCauseStackTraceAsString() {
return this.getCauseStackTraceAsString(null);
}
/**
* Format the Throwable that is the cause of this Throwable.
*
* @param packages
* The List of packages to be suppressed from the trace.
* @return The formatted Throwable that caused this Throwable.
*/
public String getCauseStackTraceAsString(final List<String> packages) {
final StringBuilder sb = new StringBuilder();
if (this.causeProxy != null) {
this.formatWrapper(sb, this.causeProxy);
sb.append(WRAPPED_BY_LABEL);
}
sb.append(this.toString());
sb.append(EOL);
this.formatElements(sb, "", 0, this.throwable.getStackTrace(), this.extendedStackTrace, packages);
return sb.toString();
}
/**
* Return the number of elements that are being omitted because they are common with the parent Throwable's stack
* trace.
*
* @return The number of elements omitted from the stack trace.
*/
public int getCommonElementCount() {
return this.commonElementCount;
}
/**
* Gets the stack trace including packaging information.
*
* @return The stack trace including packaging information.
*/
public ExtendedStackTraceElement[] getExtendedStackTrace() {
return this.extendedStackTrace;
}
/**
* Format the stack trace including packaging information.
*
* @return The formatted stack trace including packaging information.
*/
public String getExtendedStackTraceAsString() {
return this.getExtendedStackTraceAsString(null);
}
/**
* Format the stack trace including packaging information.
*
* @param ignorePackages
* List of packages to be ignored in the trace.
* @return The formatted stack trace including packaging information.
*/
public String getExtendedStackTraceAsString(final List<String> ignorePackages) {
final StringBuilder sb = new StringBuilder(this.name);
final String msg = this.message;
if (msg != null) {
sb.append(": ").append(msg);
}
sb.append(EOL);
final StackTraceElement[] causedTrace = this.throwable != null ? this.throwable.getStackTrace() : null;
this.formatElements(sb, "", 0, causedTrace, this.extendedStackTrace, ignorePackages);
this.formatSuppressed(sb, "\t", this.suppressedProxies, ignorePackages);
this.formatCause(sb, "", this.causeProxy, ignorePackages);
return sb.toString();
}
public String getLocalizedMessage() {
return this.localizedMessage;
}
public String getMessage() {
return this.message;
}
/**
* Return the FQCN of the Throwable.
*
* @return The FQCN of the Throwable.
*/
public String getName() {
return this.name;
}
public StackTraceElement[] getStackTrace() {
return this.throwable == null ? null : this.throwable.getStackTrace();
}
/**
* Gets proxies for suppressed exceptions.
*
* @return proxies for suppressed exceptions.
*/
public ThrowableProxy[] getSuppressedProxies() {
return this.suppressedProxies;
}
/**
* Format the suppressed Throwables.
*
* @return The formatted suppressed Throwables.
*/
public String getSuppressedStackTrace() {
final ThrowableProxy[] suppressed = this.getSuppressedProxies();
if (suppressed == null || suppressed.length == 0) {
return Strings.EMPTY;
}
final StringBuilder sb = new StringBuilder("Suppressed Stack Trace Elements:").append(EOL);
for (final ThrowableProxy proxy : suppressed) {
sb.append(proxy.getExtendedStackTraceAsString());
}
return sb.toString();
}
/**
* The throwable or null if this object is deserialized from XML or JSON.
*
* @return The throwable or null if this object is deserialized from XML or JSON.
*/
public Throwable getThrowable() {
return this.throwable;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + (this.causeProxy == null ? 0 : this.causeProxy.hashCode());
result = prime * result + this.commonElementCount;
result = prime * result + (this.extendedStackTrace == null ? 0 : Arrays.hashCode(this.extendedStackTrace));
result = prime * result + (this.suppressedProxies == null ? 0 : Arrays.hashCode(this.suppressedProxies));
result = prime * result + (this.name == null ? 0 : this.name.hashCode());
return result;
}
private boolean ignoreElement(final StackTraceElement element, final List<String> ignorePackages) {
final String className = element.getClassName();
for (final String pkg : ignorePackages) {
if (className.startsWith(pkg)) {
return true;
}
}
return false;
}
/**
* Loads classes not located via Reflection.getCallerClass.
*
* @param lastLoader
* The ClassLoader that loaded the Class that called this Class.
* @param className
* The name of the Class.
* @return The Class object for the Class or null if it could not be located.
*/
private Class<?> loadClass(final ClassLoader lastLoader, final String className) {
// XXX: this is overly complicated
Class<?> clazz;
if (lastLoader != null) {
try {
clazz = Loader.initializeClass(className, lastLoader);
if (clazz != null) {
return clazz;
}
} catch (final Throwable ignore) {
// Ignore exception.
}
}
try {
clazz = Loader.loadClass(className);
} catch (final ClassNotFoundException ignored) {
return initializeClass(className);
} catch (final NoClassDefFoundError ignored) {
return initializeClass(className);
} catch (final SecurityException ignored) {
return initializeClass(className);
}
return clazz;
}
private Class<?> initializeClass(final String className) {
try {
return Loader.initializeClass(className, this.getClass().getClassLoader());
} catch (final ClassNotFoundException ignore) {
return null;
} catch (final NoClassDefFoundError ignore) {
return null;
} catch (final SecurityException ignore) {
return null;
}
}
/**
* Construct the CacheEntry from the Class's information.
*
* @param stackTraceElement
* The stack trace element
* @param callerClass
* The Class.
* @param exact
* True if the class was obtained via Reflection.getCallerClass.
*
* @return The CacheEntry.
*/
private CacheEntry toCacheEntry(final StackTraceElement stackTraceElement, final Class<?> callerClass,
final boolean exact) {
String location = "?";
String version = "?";
ClassLoader lastLoader = null;
if (callerClass != null) {
try {
final CodeSource source = callerClass.getProtectionDomain().getCodeSource();
if (source != null) {
final URL locationURL = source.getLocation();
if (locationURL != null) {
final String str = locationURL.toString().replace('\\', '/');
int index = str.lastIndexOf("/");
if (index >= 0 && index == str.length() - 1) {
index = str.lastIndexOf("/", index - 1);
location = str.substring(index + 1);
} else {
location = str.substring(index + 1);
}
}
}
} catch (final Exception ex) {
// Ignore the exception.
}
final Package pkg = callerClass.getPackage();
if (pkg != null) {
final String ver = pkg.getImplementationVersion();
if (ver != null) {
version = ver;
}
}
lastLoader = callerClass.getClassLoader();
}
return new CacheEntry(new ExtendedClassInfo(exact, location, version), lastLoader);
}
/**
* Resolve all the stack entries in this stack trace that are not common with the parent.
*
* @param stack
* The callers Class stack.
* @param map
* The cache of CacheEntry objects.
* @param rootTrace
* The first stack trace resolve or null.
* @param stackTrace
* The stack trace being resolved.
* @return The StackTracePackageElement array.
*/
ExtendedStackTraceElement[] toExtendedStackTrace(final Stack<Class<?>> stack, final Map<String, CacheEntry> map,
final StackTraceElement[] rootTrace, final StackTraceElement[] stackTrace) {
int stackLength;
if (rootTrace != null) {
int rootIndex = rootTrace.length - 1;
int stackIndex = stackTrace.length - 1;
while (rootIndex >= 0 && stackIndex >= 0 && rootTrace[rootIndex].equals(stackTrace[stackIndex])) {
--rootIndex;
--stackIndex;
}
this.commonElementCount = stackTrace.length - 1 - stackIndex;
stackLength = stackIndex + 1;
} else {
this.commonElementCount = 0;
stackLength = stackTrace.length;
}
final ExtendedStackTraceElement[] extStackTrace = new ExtendedStackTraceElement[stackLength];
Class<?> clazz = stack.isEmpty() ? null : stack.peek();
ClassLoader lastLoader = null;
for (int i = stackLength - 1; i >= 0; --i) {
final StackTraceElement stackTraceElement = stackTrace[i];
final String className = stackTraceElement.getClassName();
// The stack returned from getCurrentStack may be missing entries for java.lang.reflect.Method.invoke()
// and its implementation. The Throwable might also contain stack entries that are no longer
// present as those methods have returned.
ExtendedClassInfo extClassInfo;
if (clazz != null && className.equals(clazz.getName())) {
final CacheEntry entry = this.toCacheEntry(stackTraceElement, clazz, true);
extClassInfo = entry.element;
lastLoader = entry.loader;
stack.pop();
clazz = stack.isEmpty() ? null : stack.peek();
} else {
final CacheEntry cacheEntry = map.get(className);
if (cacheEntry != null) {
final CacheEntry entry = cacheEntry;
extClassInfo = entry.element;
if (entry.loader != null) {
lastLoader = entry.loader;
}
} else {
final CacheEntry entry = this.toCacheEntry(stackTraceElement,
this.loadClass(lastLoader, className), false);
extClassInfo = entry.element;
map.put(stackTraceElement.toString(), entry);
if (entry.loader != null) {
lastLoader = entry.loader;
}
}
}
extStackTrace[i] = new ExtendedStackTraceElement(stackTraceElement, extClassInfo);
}
return extStackTrace;
}
@Override
public String toString() {
final String msg = this.message;
return msg != null ? this.name + ": " + msg : this.name;
}
private ThrowableProxy[] toSuppressedProxies(final Throwable thrown, Set<Throwable> suppressedVisited) {
try {
final Throwable[] suppressed = thrown.getSuppressed();
if (suppressed == null) {
return EMPTY_THROWABLE_PROXY_ARRAY;
}
final List<ThrowableProxy> proxies = new ArrayList<>(suppressed.length);
if (suppressedVisited == null) {
suppressedVisited = new HashSet<>(proxies.size());
}
for (int i = 0; i < suppressed.length; i++) {
final Throwable candidate = suppressed[i];
if (!suppressedVisited.contains(candidate)) {
suppressedVisited.add(candidate);
proxies.add(new ThrowableProxy(candidate, suppressedVisited));
}
}
return proxies.toArray(new ThrowableProxy[proxies.size()]);
} catch (final Exception e) {
StatusLogger.getLogger().error(e);
}
return null;
}
}

View File

@ -102,7 +102,6 @@ public class MapperQueryParser extends QueryParser {
setLowercaseExpandedTerms(settings.lowercaseExpandedTerms());
setPhraseSlop(settings.phraseSlop());
setDefaultOperator(settings.defaultOperator());
setFuzzyMinSim(settings.fuzziness().asFloat());
setFuzzyPrefixLength(settings.fuzzyPrefixLength());
setLocale(settings.locale());
}
@ -114,7 +113,7 @@ public class MapperQueryParser extends QueryParser {
@Override
Query handleBareFuzzy(String qfield, Token fuzzySlop, String termImage) throws ParseException {
if (fuzzySlop.image.length() == 1) {
return getFuzzyQuery(qfield, termImage, Float.toString(fuzzyMinSim));
return getFuzzyQuery(qfield, termImage, Float.toString(settings.fuzziness().asDistance(termImage)));
}
return getFuzzyQuery(qfield, termImage, fuzzySlop.image.substring(1));
}

View File

@ -19,6 +19,7 @@
package org.elasticsearch;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.support.replication.ReplicationOperation;
import org.elasticsearch.cluster.action.shard.ShardStateAction;
import org.elasticsearch.common.io.stream.StreamInput;

View File

@ -19,12 +19,12 @@
package org.elasticsearch;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexFormatTooNewException;
import org.apache.lucene.index.IndexFormatTooOldException;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.index.Index;
import org.elasticsearch.rest.RestStatus;
@ -39,7 +39,7 @@ import java.util.Set;
public final class ExceptionsHelper {
private static final ESLogger logger = Loggers.getLogger(ExceptionsHelper.class);
private static final Logger logger = Loggers.getLogger(ExceptionsHelper.class);
public static RuntimeException convertToRuntime(Exception e) {
if (e instanceof RuntimeException) {

View File

@ -73,6 +73,8 @@ public class Version {
public static final Version V_2_3_4 = new Version(V_2_3_4_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
public static final int V_2_3_5_ID = 2030599;
public static final Version V_2_3_5 = new Version(V_2_3_5_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
public static final int V_2_4_0_ID = 2040099;
public static final Version V_2_4_0 = new Version(V_2_4_0_ID, org.apache.lucene.util.Version.LUCENE_5_5_2);
public static final int V_5_0_0_alpha1_ID = 5000001;
public static final Version V_5_0_0_alpha1 = new Version(V_5_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
public static final int V_5_0_0_alpha2_ID = 5000002;
@ -110,6 +112,8 @@ public class Version {
return V_5_0_0_alpha2;
case V_5_0_0_alpha1_ID:
return V_5_0_0_alpha1;
case V_2_4_0_ID:
return V_2_4_0;
case V_2_3_5_ID:
return V_2_3_5;
case V_2_3_4_ID:

View File

@ -20,7 +20,6 @@
package org.elasticsearch.action;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
@ -335,7 +334,7 @@ public class ActionModule extends AbstractModule {
this.actionPlugins = actionPlugins;
actions = setupActions(actionPlugins);
actionFilters = setupActionFilters(actionPlugins, ingestEnabled);
autoCreateIndex = transportClient ? null : new AutoCreateIndex(settings, resolver);
autoCreateIndex = transportClient ? null : new AutoCreateIndex(settings, clusterSettings, resolver);
destructiveOperations = new DestructiveOperations(settings, clusterSettings);
Set<String> headers = actionPlugins.stream().flatMap(p -> p.getRestHeaders().stream()).collect(Collectors.toSet());
restController = new RestController(settings, headers);

View File

@ -19,6 +19,8 @@
package org.elasticsearch.action.admin.cluster.health;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.ActiveShardCount;
@ -106,7 +108,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
@Override
public void onFailure(String source, Exception e) {
logger.error("unexpected failure during [{}]", e, source);
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
listener.onFailure(e);
}

View File

@ -37,10 +37,6 @@ import org.elasticsearch.threadpool.ThreadPoolInfo;
import org.elasticsearch.transport.TransportInfo;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import static java.util.Collections.unmodifiableMap;
/**
* Node information (static, does not change over time).
@ -85,8 +81,8 @@ public class NodeInfo extends BaseNodeResponse {
public NodeInfo(Version version, Build build, DiscoveryNode node, @Nullable Settings settings,
@Nullable OsInfo os, @Nullable ProcessInfo process, @Nullable JvmInfo jvm, @Nullable ThreadPoolInfo threadPool,
@Nullable TransportInfo transport, @Nullable HttpInfo http, @Nullable PluginsAndModules plugins, @Nullable IngestInfo ingest,
@Nullable ByteSizeValue totalIndexingBuffer) {
@Nullable TransportInfo transport, @Nullable HttpInfo http, @Nullable PluginsAndModules plugins,
@Nullable IngestInfo ingest, @Nullable ByteSizeValue totalIndexingBuffer) {
super(node);
this.version = version;
this.build = build;
@ -205,31 +201,14 @@ public class NodeInfo extends BaseNodeResponse {
if (in.readBoolean()) {
settings = Settings.readSettingsFromStream(in);
}
if (in.readBoolean()) {
os = OsInfo.readOsInfo(in);
}
if (in.readBoolean()) {
process = ProcessInfo.readProcessInfo(in);
}
if (in.readBoolean()) {
jvm = JvmInfo.readJvmInfo(in);
}
if (in.readBoolean()) {
threadPool = ThreadPoolInfo.readThreadPoolInfo(in);
}
if (in.readBoolean()) {
transport = TransportInfo.readTransportInfo(in);
}
if (in.readBoolean()) {
http = HttpInfo.readHttpInfo(in);
}
if (in.readBoolean()) {
plugins = new PluginsAndModules();
plugins.readFrom(in);
}
if (in.readBoolean()) {
ingest = new IngestInfo(in);
}
os = in.readOptionalWriteable(OsInfo::new);
process = in.readOptionalWriteable(ProcessInfo::new);
jvm = in.readOptionalWriteable(JvmInfo::new);
threadPool = in.readOptionalWriteable(ThreadPoolInfo::new);
transport = in.readOptionalWriteable(TransportInfo::new);
http = in.readOptionalWriteable(HttpInfo::new);
plugins = in.readOptionalWriteable(PluginsAndModules::new);
ingest = in.readOptionalWriteable(IngestInfo::new);
}
@Override
@ -249,53 +228,13 @@ public class NodeInfo extends BaseNodeResponse {
out.writeBoolean(true);
Settings.writeSettingsToStream(settings, out);
}
if (os == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
os.writeTo(out);
}
if (process == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
process.writeTo(out);
}
if (jvm == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
jvm.writeTo(out);
}
if (threadPool == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
threadPool.writeTo(out);
}
if (transport == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
transport.writeTo(out);
}
if (http == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
http.writeTo(out);
}
if (plugins == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
plugins.writeTo(out);
}
if (ingest == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
ingest.writeTo(out);
}
out.writeOptionalWriteable(os);
out.writeOptionalWriteable(process);
out.writeOptionalWriteable(jvm);
out.writeOptionalWriteable(threadPool);
out.writeOptionalWriteable(transport);
out.writeOptionalWriteable(http);
out.writeOptionalWriteable(plugins);
out.writeOptionalWriteable(ingest);
}
}

View File

@ -21,7 +21,7 @@ package org.elasticsearch.action.admin.cluster.node.info;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.plugins.PluginInfo;
@ -34,13 +34,24 @@ import java.util.List;
/**
* Information about plugins and modules
*/
public class PluginsAndModules implements Streamable, ToXContent {
private List<PluginInfo> plugins;
private List<PluginInfo> modules;
public class PluginsAndModules implements Writeable, ToXContent {
private final List<PluginInfo> plugins;
private final List<PluginInfo> modules;
public PluginsAndModules() {
plugins = new ArrayList<>();
modules = new ArrayList<>();
public PluginsAndModules(List<PluginInfo> plugins, List<PluginInfo> modules) {
this.plugins = Collections.unmodifiableList(plugins);
this.modules = Collections.unmodifiableList(modules);
}
public PluginsAndModules(StreamInput in) throws IOException {
this.plugins = Collections.unmodifiableList(in.readList(PluginInfo::new));
this.modules = Collections.unmodifiableList(in.readList(PluginInfo::new));
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeList(plugins);
out.writeList(modules);
}
/**
@ -69,33 +80,6 @@ public class PluginsAndModules implements Streamable, ToXContent {
modules.add(info);
}
@Override
public void readFrom(StreamInput in) throws IOException {
if (plugins.isEmpty() == false || modules.isEmpty() == false) {
throw new IllegalStateException("instance is already populated");
}
int plugins_size = in.readInt();
for (int i = 0; i < plugins_size; i++) {
plugins.add(PluginInfo.readFromStream(in));
}
int modules_size = in.readInt();
for (int i = 0; i < modules_size; i++) {
modules.add(PluginInfo.readFromStream(in));
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeInt(plugins.size());
for (PluginInfo plugin : getPluginInfos()) {
plugin.writeTo(out);
}
out.writeInt(modules.size());
for (PluginInfo module : getModuleInfos()) {
module.writeTo(out);
}
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startArray("plugins");

View File

@ -212,7 +212,7 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
indices = NodeIndicesStats.readIndicesStats(in);
}
if (in.readBoolean()) {
os = OsStats.readOsStats(in);
os = new OsStats(in);
}
if (in.readBoolean()) {
process = ProcessStats.readProcessStats(in);

View File

@ -19,6 +19,9 @@
package org.elasticsearch.action.admin.cluster.reroute;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
@ -33,7 +36,6 @@ import org.elasticsearch.cluster.routing.allocation.RoutingExplanations;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@ -77,13 +79,13 @@ public class TransportClusterRerouteAction extends TransportMasterNodeAction<Clu
private final ClusterRerouteRequest request;
private final ActionListener<ClusterRerouteResponse> listener;
private final ESLogger logger;
private final Logger logger;
private final AllocationService allocationService;
private volatile ClusterState clusterStateToSend;
private volatile RoutingExplanations explanations;
ClusterRerouteResponseAckedClusterStateUpdateTask(ESLogger logger, AllocationService allocationService, ClusterRerouteRequest request,
ActionListener<ClusterRerouteResponse> listener) {
ClusterRerouteResponseAckedClusterStateUpdateTask(Logger logger, AllocationService allocationService, ClusterRerouteRequest request,
ActionListener<ClusterRerouteResponse> listener) {
super(Priority.IMMEDIATE, request, listener);
this.request = request;
this.listener = listener;
@ -103,7 +105,7 @@ public class TransportClusterRerouteAction extends TransportMasterNodeAction<Clu
@Override
public void onFailure(String source, Exception e) {
logger.debug("failed to perform [{}]", e, source);
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to perform [{}]", source), e);
super.onFailure(source, e);
}

View File

@ -19,6 +19,8 @@
package org.elasticsearch.action.admin.cluster.settings;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
@ -148,7 +150,7 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
@Override
public void onFailure(String source, Exception e) {
//if the reroute fails we only log
logger.debug("failed to perform [{}]", e, source);
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to perform [{}]", source), e);
listener.onFailure(new ElasticsearchException("reroute after update settings failed", e));
}
@ -166,7 +168,7 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
@Override
public void onFailure(String source, Exception e) {
logger.debug("failed to perform [{}]", e, source);
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to perform [{}]", source), e);
super.onFailure(source, e);
}

View File

@ -87,7 +87,7 @@ public class ClusterStatsNodes implements ToXContent {
}
}
this.counts = new Counts(nodeInfos);
this.os = new OsStats(nodeInfos);
this.os = new OsStats(nodeInfos, nodeStats);
this.process = new ProcessStats(nodeStats);
this.jvm = new JvmStats(nodeInfos, nodeStats);
this.networkTypes = new NetworkTypes(nodeInfos);
@ -226,11 +226,12 @@ public class ClusterStatsNodes implements ToXContent {
final int availableProcessors;
final int allocatedProcessors;
final ObjectIntHashMap<String> names;
final org.elasticsearch.monitor.os.OsStats.Mem mem;
/**
* Build the stats from information about each node.
*/
private OsStats(List<NodeInfo> nodeInfos) {
private OsStats(List<NodeInfo> nodeInfos, List<NodeStats> nodeStatsList) {
this.names = new ObjectIntHashMap<>();
int availableProcessors = 0;
int allocatedProcessors = 0;
@ -244,6 +245,22 @@ public class ClusterStatsNodes implements ToXContent {
}
this.availableProcessors = availableProcessors;
this.allocatedProcessors = allocatedProcessors;
long totalMemory = 0;
long freeMemory = 0;
for (NodeStats nodeStats : nodeStatsList) {
if (nodeStats.getOs() != null) {
long total = nodeStats.getOs().getMem().getTotal().bytes();
if (total > 0) {
totalMemory += total;
}
long free = nodeStats.getOs().getMem().getFree().bytes();
if (free > 0) {
freeMemory += free;
}
}
}
this.mem = new org.elasticsearch.monitor.os.OsStats.Mem(totalMemory, freeMemory);
}
public int getAvailableProcessors() {
@ -254,6 +271,10 @@ public class ClusterStatsNodes implements ToXContent {
return allocatedProcessors;
}
public org.elasticsearch.monitor.os.OsStats.Mem getMem() {
return mem;
}
static final class Fields {
static final String AVAILABLE_PROCESSORS = "available_processors";
static final String ALLOCATED_PROCESSORS = "allocated_processors";
@ -274,6 +295,7 @@ public class ClusterStatsNodes implements ToXContent {
builder.endObject();
}
builder.endArray();
mem.toXContent(builder, params);
return builder;
}
}

View File

@ -92,7 +92,7 @@ public class TransportClusterStatsAction extends TransportNodesAction<ClusterSta
@Override
protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeRequest) {
NodeInfo nodeInfo = nodeService.info(true, true, false, true, false, true, false, true, false, false);
NodeStats nodeStats = nodeService.stats(CommonStatsFlags.NONE, false, true, true, false, true, false, false, false, false, false, false);
NodeStats nodeStats = nodeService.stats(CommonStatsFlags.NONE, true, true, true, false, true, false, false, false, false, false, false);
List<ShardStats> shardsStats = new ArrayList<>();
for (IndexService indexService : indicesService) {
for (IndexShard indexShard : indexService) {

View File

@ -19,6 +19,8 @@
package org.elasticsearch.action.admin.indices.close;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DestructiveOperations;
@ -108,7 +110,7 @@ public class TransportCloseIndexAction extends TransportMasterNodeAction<CloseIn
@Override
public void onFailure(Exception t) {
logger.debug("failed to close indices [{}]", t, (Object)concreteIndices);
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to close indices [{}]", (Object) concreteIndices), t);
listener.onFailure(t);
}
});

View File

@ -19,6 +19,8 @@
package org.elasticsearch.action.admin.indices.delete;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DestructiveOperations;
@ -100,7 +102,7 @@ public class TransportDeleteIndexAction extends TransportMasterNodeAction<Delete
@Override
public void onFailure(Exception t) {
logger.debug("failed to delete indices [{}]", t, concreteIndices);
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to delete indices [{}]", concreteIndices), t);
listener.onFailure(t);
}
});

View File

@ -19,6 +19,8 @@
package org.elasticsearch.action.admin.indices.mapping.put;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
@ -92,12 +94,12 @@ public class TransportPutMappingAction extends TransportMasterNodeAction<PutMapp
@Override
public void onFailure(Exception t) {
logger.debug("failed to put mappings on indices [{}], type [{}]", t, concreteIndices, request.type());
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to put mappings on indices [{}], type [{}]", concreteIndices, request.type()), t);
listener.onFailure(t);
}
});
} catch (IndexNotFoundException ex) {
logger.debug("failed to put mappings on indices [{}], type [{}]", ex, request.indices(), request.type());
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to put mappings on indices [{}], type [{}]", request.indices(), request.type()), ex);
throw ex;
}
}

View File

@ -19,6 +19,8 @@
package org.elasticsearch.action.admin.indices.open;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DestructiveOperations;
@ -93,7 +95,7 @@ public class TransportOpenIndexAction extends TransportMasterNodeAction<OpenInde
@Override
public void onFailure(Exception t) {
logger.debug("failed to open indices [{}]", t, (Object)concreteIndices);
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to open indices [{}]", (Object) concreteIndices), t);
listener.onFailure(t);
}
});

View File

@ -19,6 +19,8 @@
package org.elasticsearch.action.admin.indices.settings.put;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
@ -92,7 +94,7 @@ public class TransportUpdateSettingsAction extends TransportMasterNodeAction<Upd
@Override
public void onFailure(Exception t) {
logger.debug("failed to update settings on indices [{}]", t, (Object)concreteIndices);
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to update settings on indices [{}]", (Object) concreteIndices), t);
listener.onFailure(t);
}
});

View File

@ -18,6 +18,7 @@
*/
package org.elasticsearch.action.admin.indices.shards;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.FailedNodeException;
@ -41,7 +42,6 @@ import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.collect.ImmutableOpenIntMap;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.CountDown;
import org.elasticsearch.gateway.AsyncShardFetch;
@ -150,7 +150,7 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
private class InternalAsyncFetch extends AsyncShardFetch<NodeGatewayStartedShards> {
InternalAsyncFetch(ESLogger logger, String type, ShardId shardId, TransportNodesListGatewayStartedShards action) {
InternalAsyncFetch(Logger logger, String type, ShardId shardId, TransportNodesListGatewayStartedShards action) {
super(logger, type, shardId, action);
}

View File

@ -18,6 +18,8 @@
*/
package org.elasticsearch.action.admin.indices.template.delete;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
@ -73,7 +75,7 @@ public class TransportDeleteIndexTemplateAction extends TransportMasterNodeActio
@Override
public void onFailure(Exception e) {
logger.debug("failed to delete templates [{}]", e, request.name());
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to delete templates [{}]", request.name()), e);
listener.onFailure(e);
}
});

View File

@ -18,6 +18,8 @@
*/
package org.elasticsearch.action.admin.indices.template.put;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
@ -94,7 +96,7 @@ public class TransportPutIndexTemplateAction extends TransportMasterNodeAction<P
@Override
public void onFailure(Exception e) {
logger.debug("failed to put template [{}]", e, request.name());
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to put template [{}]", request.name()), e);
listener.onFailure(e);
}
});

View File

@ -19,6 +19,8 @@
package org.elasticsearch.action.admin.indices.upgrade.post;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
@ -79,7 +81,7 @@ public class TransportUpgradeSettingsAction extends TransportMasterNodeAction<Up
@Override
public void onFailure(Exception t) {
logger.debug("failed to upgrade minimum compatibility version settings on indices [{}]", t, request.versions().keySet());
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to upgrade minimum compatibility version settings on indices [{}]", request.versions().keySet()), t);
listener.onFailure(t);
}
});

View File

@ -18,9 +18,11 @@
*/
package org.elasticsearch.action.bulk;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
@ -31,7 +33,7 @@ import java.util.concurrent.TimeUnit;
* Abstracts the low-level details of bulk request handling
*/
abstract class BulkRequestHandler {
protected final ESLogger logger;
protected final Logger logger;
protected final Client client;
protected BulkRequestHandler(Client client) {
@ -76,12 +78,12 @@ abstract class BulkRequestHandler {
listener.afterBulk(executionId, bulkRequest, bulkResponse);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
logger.info("Bulk request {} has been cancelled.", e, executionId);
logger.info((Supplier<?>) () -> new ParameterizedMessage("Bulk request {} has been cancelled.", executionId), e);
if (!afterCalled) {
listener.afterBulk(executionId, bulkRequest, e);
}
} catch (Exception e) {
logger.warn("Failed to execute bulk request {}.", e, executionId);
logger.warn((Supplier<?>) () -> new ParameterizedMessage("Failed to execute bulk request {}.", executionId), e);
if (!afterCalled) {
listener.afterBulk(executionId, bulkRequest, e);
}
@ -142,10 +144,10 @@ abstract class BulkRequestHandler {
bulkRequestSetupSuccessful = true;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
logger.info("Bulk request {} has been cancelled.", e, executionId);
logger.info((Supplier<?>) () -> new ParameterizedMessage("Bulk request {} has been cancelled.", executionId), e);
listener.afterBulk(executionId, bulkRequest, e);
} catch (Exception e) {
logger.warn("Failed to execute bulk request {}.", e, executionId);
logger.warn((Supplier<?>) () -> new ParameterizedMessage("Failed to execute bulk request {}.", executionId), e);
listener.afterBulk(executionId, bulkRequest, e);
} finally {
if (!bulkRequestSetupSuccessful && acquired) { // if we fail on client.bulk() release the semaphore

View File

@ -20,6 +20,7 @@
package org.elasticsearch.action.bulk;
import org.elasticsearch.action.support.replication.ReplicatedWriteRequest;
import org.elasticsearch.action.support.replication.ReplicationRequest;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.index.shard.ShardId;
@ -101,4 +102,15 @@ public class BulkShardRequest extends ReplicatedWriteRequest<BulkShardRequest> {
}
return b.toString();
}
@Override
public void onRetry() {
for (BulkItemRequest item : items) {
if (item.request() instanceof ReplicationRequest) {
// all replication requests need to be notified here as well to ie. make sure that internal optimizations are
// disabled see IndexRequest#canHaveDuplicates()
((ReplicationRequest) item.request()).onRetry();
}
}
}
}

View File

@ -18,12 +18,12 @@
*/
package org.elasticsearch.action.bulk;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionFuture;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.FutureUtils;
@ -89,7 +89,7 @@ public class Retry {
}
static class AbstractRetryHandler implements ActionListener<BulkResponse> {
private final ESLogger logger;
private final Logger logger;
private final Client client;
private final ActionListener<BulkResponse> listener;
private final Iterator<TimeValue> backoff;

View File

@ -19,6 +19,8 @@
package org.elasticsearch.action.bulk;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionRequest;
@ -30,8 +32,8 @@ import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.index.TransportIndexAction;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.replication.ReplicationRequest;
import org.elasticsearch.action.support.replication.TransportWriteAction;
import org.elasticsearch.action.support.replication.ReplicationResponse.ShardInfo;
import org.elasticsearch.action.support.replication.TransportWriteAction;
import org.elasticsearch.action.update.UpdateHelper;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.action.update.UpdateResponse;
@ -183,9 +185,9 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
private <ReplicationRequestT extends ReplicationRequest<ReplicationRequestT>> void logFailure(Throwable t, String operation, ShardId shardId, ReplicationRequest<ReplicationRequestT> request) {
if (ExceptionsHelper.status(t) == RestStatus.CONFLICT) {
logger.trace("{} failed to execute bulk item ({}) {}", t, shardId, operation, request);
logger.trace((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", shardId, operation, request), t);
} else {
logger.debug("{} failed to execute bulk item ({}) {}", t, shardId, operation, request);
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", shardId, operation, request), t);
}
}

View File

@ -19,6 +19,8 @@
package org.elasticsearch.action.get;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.TransportActions;
@ -92,7 +94,7 @@ public class TransportShardMultiGetAction extends TransportSingleShardAction<Mul
if (TransportActions.isShardNotAvailableException(e)) {
throw (ElasticsearchException) e;
} else {
logger.debug("{} failed to execute multi_get for [{}]/[{}]", e, shardId, item.type(), item.id());
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute multi_get for [{}]/[{}]", shardId, item.type(), item.id()), e);
response.add(request.locations.get(i), new MultiGetResponse.Failure(request.index(), item.type(), item.id(), e));
}
}

View File

@ -72,7 +72,7 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
/**
* Operation type controls if the type of the index operation.
*/
public static enum OpType {
public enum OpType {
/**
* Index the source. If there an existing document with the id, it will
* be replaced.
@ -152,6 +152,17 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
private String pipeline;
/**
* Value for {@link #getAutoGeneratedTimestamp()} if the document has an external
* provided ID.
*/
public static final int UNSET_AUTO_GENERATED_TIMESTAMP = -1;
private long autoGeneratedTimestamp = UNSET_AUTO_GENERATED_TIMESTAMP;
private boolean isRetry = false;
public IndexRequest() {
}
@ -202,6 +213,10 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
}
}
if (opType() != OpType.INDEX && id == null) {
addValidationError("an id is required for a " + opType() + " operation", validationException);
}
if (!versionType.validateVersionForWrites(version)) {
validationException = addValidationError("illegal version value [" + version + "] for version type [" + versionType.name() + "]", validationException);
}
@ -216,6 +231,11 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
validationException = addValidationError("id is too long, must be no longer than 512 bytes but was: " +
id.getBytes(StandardCharsets.UTF_8).length, validationException);
}
if (id == null && (versionType == VersionType.INTERNAL && version == Versions.MATCH_ANY) == false) {
validationException = addValidationError("an id must be provided if version type or value are set", validationException);
}
return validationException;
}
@ -589,10 +609,10 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
}
// generate id if not already provided and id generation is allowed
if (allowIdGeneration) {
if (id == null) {
id(UUIDs.base64UUID());
}
if (allowIdGeneration && id == null) {
assert autoGeneratedTimestamp == -1;
autoGeneratedTimestamp = Math.max(0, System.currentTimeMillis()); // extra paranoia
id(UUIDs.base64UUID());
}
// generate timestamp if not provided, we always have one post this stage...
@ -639,6 +659,8 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
version = in.readLong();
versionType = VersionType.fromValue(in.readByte());
pipeline = in.readOptionalString();
isRetry = in.readBoolean();
autoGeneratedTimestamp = in.readLong();
}
@Override
@ -655,6 +677,8 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
out.writeLong(version);
out.writeByte(versionType.getValue());
out.writeOptionalString(pipeline);
out.writeBoolean(isRetry);
out.writeLong(autoGeneratedTimestamp);
}
@Override
@ -667,4 +691,25 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
}
return "index {[" + index + "][" + type + "][" + id + "], source[" + sSource + "]}";
}
/**
* Returns <code>true</code> if this request has been sent to a shard copy more than once.
*/
public boolean isRetry() {
return isRetry;
}
@Override
public void onRetry() {
isRetry = true;
}
/**
* Returns the timestamp the auto generated ID was created or {@value #UNSET_AUTO_GENERATED_TIMESTAMP} if the
* document has no auto generated timestamp. This method will return a positive value iff the id was auto generated.
*/
public long getAutoGeneratedTimestamp() {
return autoGeneratedTimestamp;
}
}

View File

@ -205,15 +205,6 @@ public class IndexRequestBuilder extends ReplicationRequestBuilder<IndexRequest,
return this;
}
/**
* Sets a string representation of the {@link #setOpType(org.elasticsearch.action.index.IndexRequest.OpType)}. Can
* be either "index" or "create".
*/
public IndexRequestBuilder setOpType(String opType) {
request.opType(IndexRequest.OpType.fromString(opType));
return this;
}
/**
* Set to <tt>true</tt> to force this index to use {@link org.elasticsearch.action.index.IndexRequest.OpType#CREATE}.
*/

View File

@ -158,7 +158,7 @@ public class TransportIndexAction extends TransportWriteAction<IndexRequest, Ind
SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, shardId.getIndexName(), request.type(), request.id(), request.source())
.routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl());
final Engine.Index operation = indexShard.prepareIndexOnReplica(sourceToParse, request.version(), request.versionType());
final Engine.Index operation = indexShard.prepareIndexOnReplica(sourceToParse, request.version(), request.versionType(), request.getAutoGeneratedTimestamp(), request.isRetry());
Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
if (update != null) {
throw new RetryOnReplicaException(shardId, "Mappings are not available on the replica yet, triggered update: " + update);
@ -171,7 +171,7 @@ public class TransportIndexAction extends TransportWriteAction<IndexRequest, Ind
public static Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, IndexShard indexShard) {
SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.PRIMARY, request.index(), request.type(), request.id(), request.source())
.routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl());
return indexShard.prepareIndexOnPrimary(sourceToParse, request.version(), request.versionType());
return indexShard.prepareIndexOnPrimary(sourceToParse, request.version(), request.versionType(), request.getAutoGeneratedTimestamp(), request.isRetry());
}
public static WriteResult<IndexResponse> executeIndexRequestOnPrimary(IndexRequest request, IndexShard indexShard,

View File

@ -19,6 +19,8 @@
package org.elasticsearch.action.ingest;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionResponse;
@ -90,7 +92,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio
void processIndexRequest(Task task, String action, ActionListener listener, ActionFilterChain chain, IndexRequest indexRequest) {
executionService.executeIndexRequest(indexRequest, t -> {
logger.error("failed to execute pipeline [{}]", t, indexRequest.getPipeline());
logger.error((Supplier<?>) () -> new ParameterizedMessage("failed to execute pipeline [{}]", indexRequest.getPipeline()), t);
listener.onFailure(t);
}, success -> {
// TransportIndexAction uses IndexRequest and same action name on the node that receives the request and the node that
@ -105,7 +107,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio
long ingestStartTimeInNanos = System.nanoTime();
BulkRequestModifier bulkRequestModifier = new BulkRequestModifier(original);
executionService.executeBulkRequest(() -> bulkRequestModifier, (indexRequest, exception) -> {
logger.debug("failed to execute pipeline [{}] for document [{}/{}/{}]", exception, indexRequest.getPipeline(), indexRequest.index(), indexRequest.type(), indexRequest.id());
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to execute pipeline [{}] for document [{}/{}/{}]", indexRequest.getPipeline(), indexRequest.index(), indexRequest.type(), indexRequest.id()), exception);
bulkRequestModifier.markCurrentItemAsFailed(exception);
}, (exception) -> {
if (exception != null) {

View File

@ -20,8 +20,10 @@
package org.elasticsearch.action.search;
import com.carrotsearch.hppc.IntArrayList;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.NoShardAvailableActionException;
import org.elasticsearch.action.support.TransportActions;
@ -35,7 +37,6 @@ import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.SearchPhaseResult;
import org.elasticsearch.search.SearchShardTarget;
@ -46,7 +47,6 @@ import org.elasticsearch.search.internal.InternalSearchResponse;
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
import org.elasticsearch.search.query.QuerySearchResult;
import org.elasticsearch.search.query.QuerySearchResultProvider;
import org.elasticsearch.search.suggest.Suggest;
import org.elasticsearch.threadpool.ThreadPool;
import java.util.List;
@ -58,7 +58,7 @@ import static org.elasticsearch.action.search.TransportSearchHelper.internalSear
abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult> extends AbstractAsyncAction {
protected final ESLogger logger;
protected final Logger logger;
protected final SearchTransportService searchTransportService;
private final IndexNameExpressionResolver indexNameExpressionResolver;
protected final SearchPhaseController searchPhaseController;
@ -77,7 +77,7 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
private final Object shardFailuresMutex = new Object();
protected volatile ScoreDoc[] sortedShardDocs;
protected AbstractSearchAsyncAction(ESLogger logger, SearchTransportService searchTransportService, ClusterService clusterService,
protected AbstractSearchAsyncAction(Logger logger, SearchTransportService searchTransportService, ClusterService clusterService,
IndexNameExpressionResolver indexNameExpressionResolver,
SearchPhaseController searchPhaseController, ThreadPool threadPool, SearchRequest request,
ActionListener<SearchResponse> listener) {
@ -191,7 +191,12 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
innerMoveToSecondPhase();
} catch (Exception e) {
if (logger.isDebugEnabled()) {
logger.debug("{}: Failed to execute [{}] while moving to second phase", e, shardIt.shardId(), request);
logger.debug(
(Supplier<?>) () -> new ParameterizedMessage(
"{}: Failed to execute [{}] while moving to second phase",
shardIt.shardId(),
request),
e);
}
raiseEarlyFailure(new ReduceSearchPhaseException(firstPhaseName(), "", e, buildShardFailures()));
}
@ -211,15 +216,21 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
if (totalOps.incrementAndGet() == expectedTotalOps) {
if (logger.isDebugEnabled()) {
if (e != null && !TransportActions.isShardNotAvailableException(e)) {
logger.debug("{}: Failed to execute [{}]", e, shard != null ? shard.shortSummary() : shardIt.shardId(), request);
logger.debug(
(Supplier<?>) () -> new ParameterizedMessage(
"{}: Failed to execute [{}]",
shard != null ? shard.shortSummary() :
shardIt.shardId(),
request),
e);
} else if (logger.isTraceEnabled()) {
logger.trace("{}: Failed to execute [{}]", e, shard, request);
logger.trace((Supplier<?>) () -> new ParameterizedMessage("{}: Failed to execute [{}]", shard, request), e);
}
}
final ShardSearchFailure[] shardSearchFailures = buildShardFailures();
if (successfulOps.get() == 0) {
if (logger.isDebugEnabled()) {
logger.debug("All shards failed for phase: [{}]", e, firstPhaseName());
logger.debug((Supplier<?>) () -> new ParameterizedMessage("All shards failed for phase: [{}]", firstPhaseName()), e);
}
// no successful ops, raise an exception
@ -236,10 +247,13 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
final ShardRouting nextShard = shardIt.nextOrNull();
final boolean lastShard = nextShard == null;
// trace log this exception
if (logger.isTraceEnabled()) {
logger.trace("{}: Failed to execute [{}] lastShard [{}]", e, shard != null ? shard.shortSummary() : shardIt.shardId(),
request, lastShard);
}
logger.trace(
(Supplier<?>) () -> new ParameterizedMessage(
"{}: Failed to execute [{}] lastShard [{}]",
shard != null ? shard.shortSummary() : shardIt.shardId(),
request,
lastShard),
e);
if (!lastShard) {
try {
performFirstPhase(shardIndex, shardIt, nextShard);
@ -251,8 +265,14 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
// no more shards active, add a failure
if (logger.isDebugEnabled() && !logger.isTraceEnabled()) { // do not double log this exception
if (e != null && !TransportActions.isShardNotAvailableException(e)) {
logger.debug("{}: Failed to execute [{}] lastShard [{}]", e,
shard != null ? shard.shortSummary() : shardIt.shardId(), request, lastShard);
logger.debug(
(Supplier<?>) () -> new ParameterizedMessage(
"{}: Failed to execute [{}] lastShard [{}]",
shard != null ? shard.shortSummary() :
shardIt.shardId(),
request,
lastShard),
e);
}
}
}

View File

@ -19,12 +19,14 @@
package org.elasticsearch.action.search;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRunnable;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController;
@ -43,7 +45,7 @@ class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<DfsSea
private final AtomicArray<QueryFetchSearchResult> queryFetchResults;
SearchDfsQueryAndFetchAsyncAction(ESLogger logger, SearchTransportService searchTransportService,
SearchDfsQueryAndFetchAsyncAction(Logger logger, SearchTransportService searchTransportService,
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
SearchPhaseController searchPhaseController, ThreadPool threadPool,
SearchRequest request, ActionListener<SearchResponse> listener) {
@ -105,7 +107,7 @@ class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<DfsSea
void onSecondPhaseFailure(Exception e, QuerySearchRequest querySearchRequest, int shardIndex, DfsSearchResult dfsResult,
AtomicInteger counter) {
if (logger.isDebugEnabled()) {
logger.debug("[{}] Failed to execute query phase", e, querySearchRequest.id());
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute query phase", querySearchRequest.id()), e);
}
this.addShardFailure(shardIndex, dfsResult.shardTarget(), e);
successfulOps.decrementAndGet();

View File

@ -20,13 +20,15 @@
package org.elasticsearch.action.search;
import com.carrotsearch.hppc.IntArrayList;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.search.ScoreDoc;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRunnable;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.action.SearchTransportService;
@ -50,7 +52,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<DfsSe
final AtomicArray<FetchSearchResult> fetchResults;
final AtomicArray<IntArrayList> docIdsToLoad;
SearchDfsQueryThenFetchAsyncAction(ESLogger logger, SearchTransportService searchTransportService,
SearchDfsQueryThenFetchAsyncAction(Logger logger, SearchTransportService searchTransportService,
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
SearchPhaseController searchPhaseController, ThreadPool threadPool,
SearchRequest request, ActionListener<SearchResponse> listener) {
@ -113,7 +115,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<DfsSe
void onQueryFailure(Exception e, QuerySearchRequest querySearchRequest, int shardIndex, DfsSearchResult dfsResult,
AtomicInteger counter) {
if (logger.isDebugEnabled()) {
logger.debug("[{}] Failed to execute query phase", e, querySearchRequest.id());
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute query phase", querySearchRequest.id()), e);
}
this.addShardFailure(shardIndex, dfsResult.shardTarget(), e);
successfulOps.decrementAndGet();
@ -182,7 +184,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<DfsSe
void onFetchFailure(Exception e, ShardFetchSearchRequest fetchSearchRequest, int shardIndex,
SearchShardTarget shardTarget, AtomicInteger counter) {
if (logger.isDebugEnabled()) {
logger.debug("[{}] Failed to execute fetch phase", e, fetchSearchRequest.id());
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute fetch phase", fetchSearchRequest.id()), e);
}
this.addShardFailure(shardIndex, shardTarget, e);
successfulOps.decrementAndGet();

View File

@ -19,12 +19,12 @@
package org.elasticsearch.action.search;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRunnable;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.fetch.QueryFetchSearchResult;
@ -36,7 +36,7 @@ import java.io.IOException;
class SearchQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<QueryFetchSearchResult> {
SearchQueryAndFetchAsyncAction(ESLogger logger, SearchTransportService searchTransportService,
SearchQueryAndFetchAsyncAction(Logger logger, SearchTransportService searchTransportService,
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
SearchPhaseController searchPhaseController, ThreadPool threadPool,
SearchRequest request, ActionListener<SearchResponse> listener) {

View File

@ -20,13 +20,15 @@
package org.elasticsearch.action.search;
import com.carrotsearch.hppc.IntArrayList;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.search.ScoreDoc;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRunnable;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.action.SearchTransportService;
@ -46,7 +48,7 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<QuerySea
final AtomicArray<FetchSearchResult> fetchResults;
final AtomicArray<IntArrayList> docIdsToLoad;
SearchQueryThenFetchAsyncAction(ESLogger logger, SearchTransportService searchService,
SearchQueryThenFetchAsyncAction(Logger logger, SearchTransportService searchService,
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
SearchPhaseController searchPhaseController, ThreadPool threadPool,
SearchRequest request, ActionListener<SearchResponse> listener) {
@ -115,7 +117,7 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<QuerySea
void onFetchFailure(Exception e, ShardFetchSearchRequest fetchSearchRequest, int shardIndex, SearchShardTarget shardTarget,
AtomicInteger counter) {
if (logger.isDebugEnabled()) {
logger.debug("[{}] Failed to execute fetch phase", e, fetchSearchRequest.id());
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute fetch phase", fetchSearchRequest.id()), e);
}
this.addShardFailure(shardIndex, shardTarget, e);
successfulOps.decrementAndGet();

View File

@ -19,12 +19,14 @@
package org.elasticsearch.action.search;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.search.ScoreDoc;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController;
@ -40,7 +42,7 @@ import static org.elasticsearch.action.search.TransportSearchHelper.internalScro
class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction {
private final ESLogger logger;
private final Logger logger;
private final SearchPhaseController searchPhaseController;
private final SearchTransportService searchTransportService;
private final SearchScrollRequest request;
@ -52,7 +54,7 @@ class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction {
private final AtomicInteger successfulOps;
private final AtomicInteger counter;
SearchScrollQueryAndFetchAsyncAction(ESLogger logger, ClusterService clusterService,
SearchScrollQueryAndFetchAsyncAction(Logger logger, ClusterService clusterService,
SearchTransportService searchTransportService, SearchPhaseController searchPhaseController,
SearchScrollRequest request, ParsedScrollId scrollId, ActionListener<SearchResponse> listener) {
this.logger = logger;
@ -146,7 +148,7 @@ class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction {
private void onPhaseFailure(Exception e, long searchId, int shardIndex) {
if (logger.isDebugEnabled()) {
logger.debug("[{}] Failed to execute query phase", e, searchId);
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute query phase", searchId), e);
}
addShardFailure(shardIndex, new ShardSearchFailure(e));
successfulOps.decrementAndGet();

View File

@ -20,12 +20,14 @@
package org.elasticsearch.action.search;
import com.carrotsearch.hppc.IntArrayList;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.search.ScoreDoc;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController;
@ -43,7 +45,7 @@ import static org.elasticsearch.action.search.TransportSearchHelper.internalScro
class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
private final ESLogger logger;
private final Logger logger;
private final SearchTransportService searchTransportService;
private final SearchPhaseController searchPhaseController;
private final SearchScrollRequest request;
@ -56,7 +58,7 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
private volatile ScoreDoc[] sortedShardDocs;
private final AtomicInteger successfulOps;
SearchScrollQueryThenFetchAsyncAction(ESLogger logger, ClusterService clusterService,
SearchScrollQueryThenFetchAsyncAction(Logger logger, ClusterService clusterService,
SearchTransportService searchTransportService, SearchPhaseController searchPhaseController,
SearchScrollRequest request, ParsedScrollId scrollId, ActionListener<SearchResponse> listener) {
this.logger = logger;
@ -146,7 +148,7 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
void onQueryPhaseFailure(final int shardIndex, final AtomicInteger counter, final long searchId, Exception failure) {
if (logger.isDebugEnabled()) {
logger.debug("[{}] Failed to execute query phase", failure, searchId);
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute query phase", searchId), failure);
}
addShardFailure(shardIndex, new ShardSearchFailure(failure));
successfulOps.decrementAndGet();

View File

@ -19,6 +19,8 @@
package org.elasticsearch.action.search;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
@ -144,7 +146,7 @@ public class TransportClearScrollAction extends HandledTransportAction<ClearScro
}
void onFailedFreedContext(Throwable e, DiscoveryNode node) {
logger.warn("Clear SC failed on node[{}]", e, node);
logger.warn((Supplier<?>) () -> new ParameterizedMessage("Clear SC failed on node[{}]", node), e);
if (expectedOps.countDown()) {
listener.onResponse(new ClearScrollResponse(false, numberOfFreedSearchContexts.get()));
} else {

View File

@ -19,9 +19,9 @@
package org.elasticsearch.action.support;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ListenableActionFuture;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.threadpool.ThreadPool;
@ -33,7 +33,7 @@ import java.util.List;
*/
public abstract class AbstractListenableActionFuture<T, L> extends AdapterActionFuture<T, L> implements ListenableActionFuture<T> {
private static final ESLogger logger = Loggers.getLogger(AbstractListenableActionFuture.class);
private static final Logger logger = Loggers.getLogger(AbstractListenableActionFuture.class);
final ThreadPool threadPool;
volatile Object listeners;

View File

@ -24,8 +24,8 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
@ -41,16 +41,17 @@ import java.util.List;
public final class AutoCreateIndex {
public static final Setting<AutoCreate> AUTO_CREATE_INDEX_SETTING =
new Setting<>("action.auto_create_index", "true", AutoCreate::new, Property.NodeScope);
new Setting<>("action.auto_create_index", "true", AutoCreate::new, Property.NodeScope, Setting.Property.Dynamic);
private final boolean dynamicMappingDisabled;
private final IndexNameExpressionResolver resolver;
private final AutoCreate autoCreate;
private volatile AutoCreate autoCreate;
public AutoCreateIndex(Settings settings, IndexNameExpressionResolver resolver) {
public AutoCreateIndex(Settings settings, ClusterSettings clusterSettings, IndexNameExpressionResolver resolver) {
this.resolver = resolver;
dynamicMappingDisabled = !MapperService.INDEX_MAPPER_DYNAMIC_SETTING.get(settings);
this.autoCreate = AUTO_CREATE_INDEX_SETTING.get(settings);
clusterSettings.addSettingsUpdateConsumer(AUTO_CREATE_INDEX_SETTING, this::setAutoCreate);
}
/**
@ -64,6 +65,8 @@ public final class AutoCreateIndex {
* Should the index be auto created?
*/
public boolean shouldAutoCreate(String index, ClusterState state) {
// One volatile read, so that all checks are done against the same instance:
final AutoCreate autoCreate = this.autoCreate;
if (autoCreate.autoCreateIndex == false) {
return false;
}
@ -87,7 +90,15 @@ public final class AutoCreateIndex {
return false;
}
private static class AutoCreate {
AutoCreate getAutoCreate() {
return autoCreate;
}
void setAutoCreate(AutoCreate autoCreate) {
this.autoCreate = autoCreate;
}
static class AutoCreate {
private final boolean autoCreateIndex;
private final List<Tuple<String, Boolean>> expressions;
@ -128,5 +139,13 @@ public final class AutoCreateIndex {
this.expressions = expressions;
this.autoCreateIndex = autoCreateIndex;
}
boolean isAutoCreateIndex() {
return autoCreateIndex;
}
List<Tuple<String, Boolean>> getExpressions() {
return expressions;
}
}
}

View File

@ -18,6 +18,7 @@
*/
package org.elasticsearch.action.support;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionResponse;
@ -75,8 +76,13 @@ public abstract class HandledTransportAction<Request extends ActionRequest<Reque
try {
channel.sendResponse(e);
} catch (Exception e1) {
logger.warn("Failed to send error response for action [{}] and request [{}]", e1,
actionName, request);
logger.warn(
(org.apache.logging.log4j.util.Supplier<?>)
() -> new ParameterizedMessage(
"Failed to send error response for action [{}] and request [{}]",
actionName,
request),
e1);
}
}
});

View File

@ -19,10 +19,12 @@
package org.elasticsearch.action.support;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.threadpool.ThreadPool;
@ -39,12 +41,12 @@ public final class ThreadedActionListener<Response> implements ActionListener<Re
*/
public static class Wrapper {
private final ESLogger logger;
private final Logger logger;
private final ThreadPool threadPool;
private final boolean threadedListener;
public Wrapper(ESLogger logger, Settings settings, ThreadPool threadPool) {
public Wrapper(Logger logger, Settings settings, ThreadPool threadPool) {
this.logger = logger;
this.threadPool = threadPool;
// Should the action listener be threaded or not by default. Action listeners are automatically threaded for
@ -68,13 +70,13 @@ public final class ThreadedActionListener<Response> implements ActionListener<Re
}
}
private final ESLogger logger;
private final Logger logger;
private final ThreadPool threadPool;
private final String executor;
private final ActionListener<Response> listener;
private final boolean forceExecution;
public ThreadedActionListener(ESLogger logger, ThreadPool threadPool, String executor, ActionListener<Response> listener,
public ThreadedActionListener(Logger logger, ThreadPool threadPool, String executor, ActionListener<Response> listener,
boolean forceExecution) {
this.logger = logger;
this.threadPool = threadPool;
@ -118,7 +120,8 @@ public final class ThreadedActionListener<Response> implements ActionListener<Re
@Override
public void onFailure(Exception e) {
logger.warn("failed to execute failure callback on [{}], failure [{}]", e, listener, e);
logger.warn(
(Supplier<?>) () -> new ParameterizedMessage("failed to execute failure callback on [{}]", listener), e);
}
});
}

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.support;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionFuture;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
@ -27,7 +28,6 @@ import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskListener;
@ -165,9 +165,9 @@ public abstract class TransportAction<Request extends ActionRequest<Request>, Re
private final TransportAction<Request, Response> action;
private final AtomicInteger index = new AtomicInteger();
private final ESLogger logger;
private final Logger logger;
private RequestFilterChain(TransportAction<Request, Response> action, ESLogger logger) {
private RequestFilterChain(TransportAction<Request, Response> action, Logger logger) {
this.action = action;
this.logger = logger;
}
@ -201,9 +201,9 @@ public abstract class TransportAction<Request extends ActionRequest<Request>, Re
private final ActionFilter[] filters;
private final AtomicInteger index;
private final ESLogger logger;
private final Logger logger;
private ResponseFilterChain(ActionFilter[] filters, ESLogger logger) {
private ResponseFilterChain(ActionFilter[] filters, Logger logger) {
this.filters = filters;
this.index = new AtomicInteger(filters.length);
this.logger = logger;

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.support.broadcast;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.NoShardAvailableActionException;
import org.elasticsearch.action.support.ActionFilters;
@ -37,10 +38,10 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService;
import java.util.concurrent.atomic.AtomicInteger;
@ -224,7 +225,13 @@ public abstract class TransportBroadcastAction<Request extends BroadcastRequest<
if (e != null) {
if (logger.isTraceEnabled()) {
if (!TransportActions.isShardNotAvailableException(e)) {
logger.trace("{}: failed to execute [{}]", e, shard != null ? shard.shortSummary() : shardIt.shardId(), request);
logger.trace(
(org.apache.logging.log4j.util.Supplier<?>)
() -> new ParameterizedMessage(
"{}: failed to execute [{}]",
shard != null ? shard.shortSummary() : shardIt.shardId(),
request),
e);
}
}
}
@ -233,7 +240,13 @@ public abstract class TransportBroadcastAction<Request extends BroadcastRequest<
if (logger.isDebugEnabled()) {
if (e != null) {
if (!TransportActions.isShardNotAvailableException(e)) {
logger.debug("{}: failed to execute [{}]", e, shard != null ? shard.shortSummary() : shardIt.shardId(), request);
logger.debug(
(org.apache.logging.log4j.util.Supplier<?>)
() -> new ParameterizedMessage(
"{}: failed to execute [{}]",
shard != null ? shard.shortSummary() : shardIt.shardId(),
request),
e);
}
}
}

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.support.broadcast.node;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.IndicesRequest;
@ -46,13 +47,13 @@ import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.NodeShouldNotConnectException;
import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
@ -363,7 +364,9 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
protected void onNodeFailure(DiscoveryNode node, int nodeIndex, Throwable t) {
String nodeId = node.getId();
if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) {
logger.debug("failed to execute [{}] on node [{}]", t, actionName, nodeId);
logger.debug(
(org.apache.logging.log4j.util.Supplier<?>)
() -> new ParameterizedMessage("failed to execute [{}] on node [{}]", actionName, nodeId), t);
}
// this is defensive to protect against the possibility of double invocation
@ -441,11 +444,23 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
shardResults[shardIndex] = failure;
if (TransportActions.isShardNotAvailableException(e)) {
if (logger.isTraceEnabled()) {
logger.trace("[{}] failed to execute operation for shard [{}]", e, actionName, shardRouting.shortSummary());
logger.trace(
(org.apache.logging.log4j.util.Supplier<?>)
() -> new ParameterizedMessage(
"[{}] failed to execute operation for shard [{}]",
actionName,
shardRouting.shortSummary()),
e);
}
} else {
if (logger.isDebugEnabled()) {
logger.debug("[{}] failed to execute operation for shard [{}]", e, actionName, shardRouting.shortSummary());
logger.debug(
(org.apache.logging.log4j.util.Supplier<?>)
() -> new ParameterizedMessage(
"[{}] failed to execute operation for shard [{}]",
actionName,
shardRouting.shortSummary()),
e);
}
}
}

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.support.master;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionListenerResponseHandler;
import org.elasticsearch.action.ActionResponse;
@ -155,7 +156,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
public void onFailure(Exception t) {
if (t instanceof Discovery.FailedToCommitClusterStateException
|| (t instanceof NotMasterException)) {
logger.debug("master could not publish cluster state or stepped down before publishing action [{}], scheduling a retry", t, actionName);
logger.debug((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("master could not publish cluster state or stepped down before publishing action [{}], scheduling a retry", actionName), t);
retry(t, MasterNodeChangePredicate.INSTANCE);
} else {
listener.onFailure(t);
@ -209,7 +210,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
@Override
public void onTimeout(TimeValue timeout) {
logger.debug("timed out while retrying [{}] after failure (timeout [{}])", failure, actionName, timeout);
logger.debug((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("timed out while retrying [{}] after failure (timeout [{}])", actionName, timeout), failure);
listener.onFailure(new MasterNotDiscoveredException(failure));
}
}, changePredicate

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.support.nodes;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.NoSuchNodeException;
@ -31,13 +32,13 @@ import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.NodeShouldNotConnectException;
import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportRequestOptions;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService;
import java.util.ArrayList;
@ -238,7 +239,9 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
private void onFailure(int idx, String nodeId, Throwable t) {
if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) {
logger.debug("failed to execute on node [{}]", t, nodeId);
logger.debug(
(org.apache.logging.log4j.util.Supplier<?>)
() -> new ParameterizedMessage("failed to execute on node [{}]", nodeId), t);
}
if (accumulateExceptions()) {
responses.set(idx, new FailedNodeException(nodeId, "Failed node [" + nodeId + "]", t));

View File

@ -18,6 +18,8 @@
*/
package org.elasticsearch.action.support.replication;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionListener;
@ -31,7 +33,6 @@ import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.index.engine.VersionConflictEngineException;
import org.elasticsearch.index.shard.ShardId;
@ -56,7 +57,7 @@ public class ReplicationOperation<
ReplicaRequest extends ReplicationRequest<ReplicaRequest>,
PrimaryResultT extends ReplicationOperation.PrimaryResult<ReplicaRequest>
> {
private final ESLogger logger;
private final Logger logger;
private final Request request;
private final Supplier<ClusterState> clusterStateSupplier;
private final String opType;
@ -86,7 +87,7 @@ public class ReplicationOperation<
public ReplicationOperation(Request request, Primary<Request, ReplicaRequest, PrimaryResultT> primary,
ActionListener<PrimaryResultT> listener,
boolean executeOnReplicas, Replicas<ReplicaRequest> replicas,
Supplier<ClusterState> clusterStateSupplier, ESLogger logger, String opType) {
Supplier<ClusterState> clusterStateSupplier, Logger logger, String opType) {
this.executeOnReplicas = executeOnReplicas;
this.replicasProxy = replicas;
this.primary = primary;
@ -189,8 +190,14 @@ public class ReplicationOperation<
@Override
public void onFailure(Exception replicaException) {
logger.trace("[{}] failure while performing [{}] on replica {}, request [{}]", replicaException, shard.shardId(), opType,
shard, replicaRequest);
logger.trace(
(org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage(
"[{}] failure while performing [{}] on replica {}, request [{}]",
shard.shardId(),
opType,
shard,
replicaRequest),
replicaException);
if (ignoreReplicaException(replicaException)) {
decPendingAndFinishIfNeeded();
} else {
@ -198,7 +205,9 @@ public class ReplicationOperation<
shardReplicaFailures.add(new ReplicationResponse.ShardInfo.Failure(
shard.shardId(), shard.currentNodeId(), replicaException, restStatus, false));
String message = String.format(Locale.ROOT, "failed to perform %s on replica %s", opType, shard);
logger.warn("[{}] {}", replicaException, shard.shardId(), message);
logger.warn(
(org.apache.logging.log4j.util.Supplier<?>)
() -> new ParameterizedMessage("[{}] {}", shard.shardId(), message), replicaException);
replicasProxy.failShard(shard, replicaRequest.primaryTerm(), message, replicaException,
ReplicationOperation.this::decPendingAndFinishIfNeeded,
ReplicationOperation.this::onPrimaryDemoted,

View File

@ -248,4 +248,12 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
public String getDescription() {
return toString();
}
/**
* This method is called before this replication request is retried
* the first time.
*/
public void onRetry() {
// nothing by default
}
}

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.support.replication;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionListenerResponseHandler;
@ -37,10 +38,12 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.AllocationId;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.settings.Settings;
@ -52,22 +55,26 @@ import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.IndexShardState;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.ShardNotFoundException;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.node.NodeClosedException;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskId;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.ConnectTransportException;
import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportChannelResponseHandler;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportRequestOptions;
import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportResponse.Empty;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.Objects;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Consumer;
import java.util.function.Supplier;
@ -114,9 +121,12 @@ public abstract class TransportReplicationAction<
this.transportPrimaryAction = actionName + "[p]";
this.transportReplicaAction = actionName + "[r]";
transportService.registerRequestHandler(actionName, request, ThreadPool.Names.SAME, new OperationTransportHandler());
transportService.registerRequestHandler(transportPrimaryAction, request, executor, new PrimaryOperationTransportHandler());
transportService.registerRequestHandler(transportPrimaryAction, () -> new ConcreteShardRequest<>(request), executor,
new PrimaryOperationTransportHandler());
// we must never reject on because of thread pool capacity on replicas
transportService.registerRequestHandler(transportReplicaAction, replicaRequest, executor, true, true,
transportService.registerRequestHandler(transportReplicaAction,
() -> new ConcreteShardRequest<>(replicaRequest),
executor, true, true,
new ReplicaOperationTransportHandler());
this.transportOptions = transportOptions();
@ -162,7 +172,7 @@ public abstract class TransportReplicationAction<
/**
* Synchronous replica operation on nodes with replica copies. This is done under the lock form
* {@link #acquireReplicaOperationLock(ShardId, long, ActionListener)}.
* {@link #acquireReplicaOperationLock(ShardId, long, String, ActionListener)}.
*/
protected abstract ReplicaResult shardOperationOnReplica(ReplicaRequest shardRequest);
@ -215,7 +225,9 @@ public abstract class TransportReplicationAction<
channel.sendResponse(e);
} catch (Exception inner) {
inner.addSuppressed(e);
logger.warn("Failed to send response for {}", inner, actionName);
logger.warn(
(org.apache.logging.log4j.util.Supplier<?>)
() -> new ParameterizedMessage("Failed to send response for {}", actionName), inner);
}
}
});
@ -227,33 +239,36 @@ public abstract class TransportReplicationAction<
}
}
class PrimaryOperationTransportHandler implements TransportRequestHandler<Request> {
class PrimaryOperationTransportHandler implements TransportRequestHandler<ConcreteShardRequest<Request>> {
@Override
public void messageReceived(final Request request, final TransportChannel channel) throws Exception {
public void messageReceived(final ConcreteShardRequest<Request> request, final TransportChannel channel) throws Exception {
throw new UnsupportedOperationException("the task parameter is required for this operation");
}
@Override
public void messageReceived(Request request, TransportChannel channel, Task task) {
new AsyncPrimaryAction(request, channel, (ReplicationTask) task).run();
public void messageReceived(ConcreteShardRequest<Request> request, TransportChannel channel, Task task) {
new AsyncPrimaryAction(request.request, request.targetAllocationID, channel, (ReplicationTask) task).run();
}
}
class AsyncPrimaryAction extends AbstractRunnable implements ActionListener<PrimaryShardReference> {
private final Request request;
/** targetAllocationID of the shard this request is meant for */
private final String targetAllocationID;
private final TransportChannel channel;
private final ReplicationTask replicationTask;
AsyncPrimaryAction(Request request, TransportChannel channel, ReplicationTask replicationTask) {
AsyncPrimaryAction(Request request, String targetAllocationID, TransportChannel channel, ReplicationTask replicationTask) {
this.request = request;
this.targetAllocationID = targetAllocationID;
this.channel = channel;
this.replicationTask = replicationTask;
}
@Override
protected void doRun() throws Exception {
acquirePrimaryShardReference(request.shardId(), this);
acquirePrimaryShardReference(request.shardId(), targetAllocationID, this);
}
@Override
@ -268,7 +283,9 @@ public abstract class TransportReplicationAction<
final ShardRouting primary = primaryShardReference.routingEntry();
assert primary.relocating() : "indexShard is marked as relocated but routing isn't" + primary;
DiscoveryNode relocatingNode = clusterService.state().nodes().get(primary.relocatingNodeId());
transportService.sendRequest(relocatingNode, transportPrimaryAction, request, transportOptions,
transportService.sendRequest(relocatingNode, transportPrimaryAction,
new ConcreteShardRequest<>(request, primary.allocationId().getRelocationId()),
transportOptions,
new TransportChannelResponseHandler<Response>(logger, channel, "rerouting indexing to target primary " + primary,
TransportReplicationAction.this::newResponseInstance) {
@ -388,15 +405,17 @@ public abstract class TransportReplicationAction<
}
}
class ReplicaOperationTransportHandler implements TransportRequestHandler<ReplicaRequest> {
class ReplicaOperationTransportHandler implements TransportRequestHandler<ConcreteShardRequest<ReplicaRequest>> {
@Override
public void messageReceived(final ReplicaRequest request, final TransportChannel channel) throws Exception {
public void messageReceived(final ConcreteShardRequest<ReplicaRequest> request, final TransportChannel channel)
throws Exception {
throw new UnsupportedOperationException("the task parameter is required for this operation");
}
@Override
public void messageReceived(ReplicaRequest request, TransportChannel channel, Task task) throws Exception {
new AsyncReplicaAction(request, channel, (ReplicationTask) task).run();
public void messageReceived(ConcreteShardRequest<ReplicaRequest> requestWithAID, TransportChannel channel, Task task)
throws Exception {
new AsyncReplicaAction(requestWithAID.request, requestWithAID.targetAllocationID, channel, (ReplicationTask) task).run();
}
}
@ -414,6 +433,8 @@ public abstract class TransportReplicationAction<
private final class AsyncReplicaAction extends AbstractRunnable implements ActionListener<Releasable> {
private final ReplicaRequest request;
// allocation id of the replica this request is meant for
private final String targetAllocationID;
private final TransportChannel channel;
/**
* The task on the node with the replica shard.
@ -423,10 +444,11 @@ public abstract class TransportReplicationAction<
// something we want to avoid at all costs
private final ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext());
AsyncReplicaAction(ReplicaRequest request, TransportChannel channel, ReplicationTask task) {
AsyncReplicaAction(ReplicaRequest request, String targetAllocationID, TransportChannel channel, ReplicationTask task) {
this.request = request;
this.channel = channel;
this.task = task;
this.targetAllocationID = targetAllocationID;
}
@Override
@ -444,7 +466,13 @@ public abstract class TransportReplicationAction<
@Override
public void onFailure(Exception e) {
if (e instanceof RetryOnReplicaException) {
logger.trace("Retrying operation on replica, action [{}], request [{}]", e, transportReplicaAction, request);
logger.trace(
(org.apache.logging.log4j.util.Supplier<?>)
() -> new ParameterizedMessage(
"Retrying operation on replica, action [{}], request [{}]",
transportReplicaAction,
request),
e);
final ThreadContext.StoredContext context = threadPool.getThreadContext().newStoredContext();
observer.waitForNextChange(new ClusterStateObserver.Listener() {
@Override
@ -455,7 +483,9 @@ public abstract class TransportReplicationAction<
String extraMessage = "action [" + transportReplicaAction + "], request[" + request + "]";
TransportChannelResponseHandler<TransportResponse.Empty> handler =
new TransportChannelResponseHandler<>(logger, channel, extraMessage, () -> TransportResponse.Empty.INSTANCE);
transportService.sendRequest(clusterService.localNode(), transportReplicaAction, request, handler);
transportService.sendRequest(clusterService.localNode(), transportReplicaAction,
new ConcreteShardRequest<>(request, targetAllocationID),
handler);
}
@Override
@ -479,7 +509,12 @@ public abstract class TransportReplicationAction<
channel.sendResponse(e);
} catch (IOException responseException) {
responseException.addSuppressed(e);
logger.warn("failed to send error message back to client for action [{}]", responseException, transportReplicaAction);
logger.warn(
(org.apache.logging.log4j.util.Supplier<?>)
() -> new ParameterizedMessage(
"failed to send error message back to client for action [{}]",
transportReplicaAction),
responseException);
}
}
@ -487,7 +522,7 @@ public abstract class TransportReplicationAction<
protected void doRun() throws Exception {
setPhase(task, "replica");
assert request.shardId() != null : "request shardId must be set";
acquireReplicaOperationLock(request.shardId(), request.primaryTerm(), this);
acquireReplicaOperationLock(request.shardId(), request.primaryTerm(), targetAllocationID, this);
}
/**
@ -584,7 +619,7 @@ public abstract class TransportReplicationAction<
logger.trace("send action [{}] on primary [{}] for request [{}] with cluster state version [{}] to [{}] ",
transportPrimaryAction, request.shardId(), request, state.version(), primary.currentNodeId());
}
performAction(node, transportPrimaryAction, true);
performAction(node, transportPrimaryAction, true, new ConcreteShardRequest<>(request, primary.allocationId().getId()));
}
private void performRemoteAction(ClusterState state, ShardRouting primary, DiscoveryNode node) {
@ -606,7 +641,7 @@ public abstract class TransportReplicationAction<
request.shardId(), request, state.version(), primary.currentNodeId());
}
setPhase(task, "rerouted");
performAction(node, actionName, false);
performAction(node, actionName, false, request);
}
private boolean retryIfUnavailable(ClusterState state, ShardRouting primary) {
@ -657,8 +692,9 @@ public abstract class TransportReplicationAction<
}
}
private void performAction(final DiscoveryNode node, final String action, final boolean isPrimaryAction) {
transportService.sendRequest(node, action, request, transportOptions, new TransportResponseHandler<Response>() {
private void performAction(final DiscoveryNode node, final String action, final boolean isPrimaryAction,
final TransportRequest requestToPerform) {
transportService.sendRequest(node, action, requestToPerform, transportOptions, new TransportResponseHandler<Response>() {
@Override
public Response newInstance() {
@ -682,8 +718,12 @@ public abstract class TransportReplicationAction<
final Throwable cause = exp.unwrapCause();
if (cause instanceof ConnectTransportException || cause instanceof NodeClosedException ||
(isPrimaryAction && retryPrimaryException(cause))) {
logger.trace("received an error from node [{}] for request [{}], scheduling a retry", exp, node.getId(),
request);
logger.trace(
(org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage(
"received an error from node [{}] for request [{}], scheduling a retry",
node.getId(),
requestToPerform),
exp);
retry(exp);
} else {
finishAsFailed(exp);
@ -704,6 +744,7 @@ public abstract class TransportReplicationAction<
return;
}
setPhase(task, "waiting_for_retry");
request.onRetry();
final ThreadContext.StoredContext context = threadPool.getThreadContext().newStoredContext();
observer.waitForNextChange(new ClusterStateObserver.Listener() {
@Override
@ -729,7 +770,9 @@ public abstract class TransportReplicationAction<
void finishAsFailed(Exception failure) {
if (finished.compareAndSet(false, true)) {
setPhase(task, "failed");
logger.trace("operation failed. action [{}], request [{}]", failure, actionName, request);
logger.trace(
(org.apache.logging.log4j.util.Supplier<?>)
() -> new ParameterizedMessage("operation failed. action [{}], request [{}]", actionName, request), failure);
listener.onFailure(failure);
} else {
assert false : "finishAsFailed called but operation is already finished";
@ -737,7 +780,13 @@ public abstract class TransportReplicationAction<
}
void finishWithUnexpectedFailure(Exception failure) {
logger.warn("unexpected error during the primary phase for action [{}], request [{}]", failure, actionName, request);
logger.warn(
(org.apache.logging.log4j.util.Supplier<?>)
() -> new ParameterizedMessage(
"unexpected error during the primary phase for action [{}], request [{}]",
actionName,
request),
failure);
if (finished.compareAndSet(false, true)) {
setPhase(task, "failed");
listener.onFailure(failure);
@ -767,7 +816,8 @@ public abstract class TransportReplicationAction<
* tries to acquire reference to {@link IndexShard} to perform a primary operation. Released after performing primary operation locally
* and replication of the operation to all replica shards is completed / failed (see {@link ReplicationOperation}).
*/
protected void acquirePrimaryShardReference(ShardId shardId, ActionListener<PrimaryShardReference> onReferenceAcquired) {
protected void acquirePrimaryShardReference(ShardId shardId, String allocationId,
ActionListener<PrimaryShardReference> onReferenceAcquired) {
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
IndexShard indexShard = indexService.getShard(shardId.id());
// we may end up here if the cluster state used to route the primary is so stale that the underlying
@ -777,6 +827,10 @@ public abstract class TransportReplicationAction<
throw new ReplicationOperation.RetryOnPrimaryException(indexShard.shardId(),
"actual shard is not a primary " + indexShard.routingEntry());
}
final String actualAllocationId = indexShard.routingEntry().allocationId().getId();
if (actualAllocationId.equals(allocationId) == false) {
throw new ShardNotFoundException(shardId, "expected aID [{}] but found [{}]", allocationId, actualAllocationId);
}
ActionListener<Releasable> onAcquired = new ActionListener<Releasable>() {
@Override
@ -796,9 +850,14 @@ public abstract class TransportReplicationAction<
/**
* tries to acquire an operation on replicas. The lock is closed as soon as replication is completed on the node.
*/
protected void acquireReplicaOperationLock(ShardId shardId, long primaryTerm, ActionListener<Releasable> onLockAcquired) {
protected void acquireReplicaOperationLock(ShardId shardId, long primaryTerm, final String allocationId,
ActionListener<Releasable> onLockAcquired) {
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
IndexShard indexShard = indexService.getShard(shardId.id());
final String actualAllocationId = indexShard.routingEntry().allocationId().getId();
if (actualAllocationId.equals(allocationId) == false) {
throw new ShardNotFoundException(shardId, "expected aID [{}] but found [{}]", allocationId, actualAllocationId);
}
indexShard.acquireReplicaOperationLock(primaryTerm, onLockAcquired, executor);
}
@ -861,7 +920,8 @@ public abstract class TransportReplicationAction<
listener.onFailure(new NoNodeAvailableException("unknown node [" + nodeId + "]"));
return;
}
transportService.sendRequest(node, transportReplicaAction, request, transportOptions,
transportService.sendRequest(node, transportReplicaAction,
new ConcreteShardRequest<>(request, replica.allocationId().getId()), transportOptions,
new ActionListenerResponseHandler<>(listener, () -> TransportResponse.Empty.INSTANCE));
}
@ -903,6 +963,72 @@ public abstract class TransportReplicationAction<
}
}
/** a wrapper class to encapsulate a request when being sent to a specific allocation id **/
public static final class ConcreteShardRequest<R extends TransportRequest> extends TransportRequest {
/** {@link AllocationId#getId()} of the shard this request is sent to **/
private String targetAllocationID;
private R request;
ConcreteShardRequest(Supplier<R> requestSupplier) {
request = requestSupplier.get();
// null now, but will be populated by reading from the streams
targetAllocationID = null;
}
ConcreteShardRequest(R request, String targetAllocationID) {
Objects.requireNonNull(request);
Objects.requireNonNull(targetAllocationID);
this.request = request;
this.targetAllocationID = targetAllocationID;
}
@Override
public void setParentTask(String parentTaskNode, long parentTaskId) {
request.setParentTask(parentTaskNode, parentTaskId);
}
@Override
public void setParentTask(TaskId taskId) {
request.setParentTask(taskId);
}
@Override
public TaskId getParentTask() {
return request.getParentTask();
}
@Override
public Task createTask(long id, String type, String action, TaskId parentTaskId) {
return request.createTask(id, type, action, parentTaskId);
}
@Override
public String getDescription() {
return "[" + request.getDescription() + "] for aID [" + targetAllocationID + "]";
}
@Override
public void readFrom(StreamInput in) throws IOException {
targetAllocationID = in.readString();
request.readFrom(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(targetAllocationID);
request.writeTo(out);
}
public R getRequest() {
return request;
}
public String getTargetAllocationID() {
return targetAllocationID;
}
}
/**
* Sets the current phase on the task if it isn't null. Pulled into its own
* method because its more convenient that way.

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.support.replication;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.WriteRequest;
@ -27,7 +28,6 @@ import org.elasticsearch.cluster.action.shard.ShardStateAction;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.shard.IndexShard;
@ -241,13 +241,13 @@ public abstract class TransportWriteAction<
private final RespondingWriteResult respond;
private final IndexShard indexShard;
private final WriteRequest<?> request;
private final ESLogger logger;
private final Logger logger;
AsyncAfterWriteAction(final IndexShard indexShard,
final WriteRequest<?> request,
@Nullable final Translog.Location location,
final RespondingWriteResult respond,
final ESLogger logger) {
final Logger logger) {
this.indexShard = indexShard;
this.request = request;
boolean waitUntilRefresh = false;

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.support.single.shard;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.NoShardAvailableActionException;
@ -39,10 +40,10 @@ import org.elasticsearch.common.logging.LoggerMessageFormat;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService;
import java.util.function.Supplier;
@ -187,7 +188,9 @@ public abstract class TransportSingleShardAction<Request extends SingleShardRequ
private void onFailure(ShardRouting shardRouting, Exception e) {
if (logger.isTraceEnabled() && e != null) {
logger.trace("{}: failed to execute [{}]", e, shardRouting, internalRequest.request());
logger.trace(
(org.apache.logging.log4j.util.Supplier<?>)
() -> new ParameterizedMessage("{}: failed to execute [{}]", shardRouting, internalRequest.request()), e);
}
perform(e);
}
@ -205,7 +208,9 @@ public abstract class TransportSingleShardAction<Request extends SingleShardRequ
failure = new NoShardAvailableActionException(null, LoggerMessageFormat.format("No shard available for [{}]", internalRequest.request()), failure);
} else {
if (logger.isDebugEnabled()) {
logger.debug("{}: failed to execute [{}]", failure, null, internalRequest.request());
logger.debug(
(org.apache.logging.log4j.util.Supplier<?>)
() -> new ParameterizedMessage("{}: failed to execute [{}]", null, internalRequest.request()), failure);
}
}
listener.onFailure(failure);

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.support.tasks;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.FailedNodeException;
@ -38,7 +39,6 @@ import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.NodeShouldNotConnectException;
import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportException;
@ -46,6 +46,7 @@ import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportRequestOptions;
import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
@ -275,7 +276,9 @@ public abstract class TransportTasksAction<
private void onFailure(int idx, String nodeId, Throwable t) {
if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) {
logger.debug("failed to execute on node [{}]", t, nodeId);
logger.debug(
(org.apache.logging.log4j.util.Supplier<?>)
() -> new ParameterizedMessage("failed to execute on node [{}]", nodeId), t);
}
if (accumulateExceptions()) {
responses.set(idx, new FailedNodeException(nodeId, "Failed node [" + nodeId + "]", t));

View File

@ -19,6 +19,8 @@
package org.elasticsearch.action.termvectors;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.TransportActions;
@ -87,7 +89,7 @@ public class TransportShardMultiTermsVectorAction extends TransportSingleShardAc
if (TransportActions.isShardNotAvailableException(t)) {
throw (ElasticsearchException) t;
} else {
logger.debug("{} failed to execute multi term vectors for [{}]/[{}]", t, shardId, termVectorsRequest.type(), termVectorsRequest.id());
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute multi term vectors for [{}]/[{}]", shardId, termVectorsRequest.type(), termVectorsRequest.id()), t);
response.add(request.locations.get(i),
new MultiTermVectorsResponse.Failure(request.index(), termVectorsRequest.type(), termVectorsRequest.id(), t));
}

View File

@ -19,6 +19,7 @@
package org.elasticsearch.bootstrap;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.Constants;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.StringHelper;
@ -28,7 +29,6 @@ import org.elasticsearch.cli.Terminal;
import org.elasticsearch.common.PidFile;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.inject.CreationException;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.LogConfigurator;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
@ -81,7 +81,7 @@ final class Bootstrap {
/** initialize native resources */
public static void initializeNatives(Path tmpFile, boolean mlockAll, boolean seccomp, boolean ctrlHandler) {
final ESLogger logger = Loggers.getLogger(Bootstrap.class);
final Logger logger = Loggers.getLogger(Bootstrap.class);
// check if the user is running as root, and bail
if (Natives.definitelyRunningAsRoot()) {
@ -227,7 +227,7 @@ final class Bootstrap {
INSTANCE = new Bootstrap();
Environment environment = initialEnvironment(foreground, pidFile, esSettings);
LogConfigurator.configure(environment.settings(), true);
LogConfigurator.configure(environment, true);
checkForCustomConfFile();
if (environment.pidFile() != null) {
@ -264,7 +264,7 @@ final class Bootstrap {
if (foreground) {
Loggers.disableConsoleLogging();
}
ESLogger logger = Loggers.getLogger(Bootstrap.class);
Logger logger = Loggers.getLogger(Bootstrap.class);
if (INSTANCE.node != null) {
logger = Loggers.getLogger(Bootstrap.class, Node.NODE_NAME_SETTING.get(INSTANCE.node.settings()));
}
@ -310,7 +310,7 @@ final class Bootstrap {
private static void checkUnsetAndMaybeExit(String confFileSetting, String settingName) {
if (confFileSetting != null && confFileSetting.isEmpty() == false) {
ESLogger logger = Loggers.getLogger(Bootstrap.class);
Logger logger = Loggers.getLogger(Bootstrap.class);
logger.info("{} is no longer supported. elasticsearch.yml must be placed in the config directory and cannot be renamed.", settingName);
exit(1);
}

View File

@ -19,15 +19,16 @@
package org.elasticsearch.bootstrap;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.util.Constants;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.BoundTransportAddress;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
import org.elasticsearch.monitor.jvm.JvmInfo;
import org.elasticsearch.monitor.process.ProcessProbe;
import org.elasticsearch.node.Node;
@ -100,7 +101,7 @@ final class BootstrapCheck {
final boolean enforceLimits,
final boolean ignoreSystemChecks,
final List<Check> checks,
final ESLogger logger) {
final Logger logger) {
final List<String> errors = new ArrayList<>();
final List<String> ignoredErrors = new ArrayList<>();
@ -136,7 +137,7 @@ final class BootstrapCheck {
}
static void log(final ESLogger logger, final String error) {
static void log(final Logger logger, final String error) {
logger.warn(error);
}
@ -417,7 +418,7 @@ final class BootstrapCheck {
}
// visible for testing
long getMaxMapCount(ESLogger logger) {
long getMaxMapCount(Logger logger) {
final Path path = getProcSysVmMaxMapCountPath();
try (final BufferedReader bufferedReader = getBufferedReader(path)) {
final String rawProcSysVmMaxMapCount = readProcSysVmMaxMapCount(bufferedReader);
@ -425,11 +426,15 @@ final class BootstrapCheck {
try {
return parseProcSysVmMaxMapCount(rawProcSysVmMaxMapCount);
} catch (final NumberFormatException e) {
logger.warn("unable to parse vm.max_map_count [{}]", e, rawProcSysVmMaxMapCount);
logger.warn(
(Supplier<?>) () -> new ParameterizedMessage(
"unable to parse vm.max_map_count [{}]",
rawProcSysVmMaxMapCount),
e);
}
}
} catch (final IOException e) {
logger.warn("I/O exception while trying to read [{}]", e, path);
logger.warn((Supplier<?>) () -> new ParameterizedMessage("I/O exception while trying to read [{}]", path), e);
}
return -1;
}

View File

@ -116,4 +116,5 @@ class Elasticsearch extends SettingCommand {
static void close(String[] args) throws IOException {
Bootstrap.stop();
}
}

View File

@ -19,9 +19,10 @@
package org.elasticsearch.bootstrap;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.lucene.index.MergePolicy;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import java.io.IOError;
@ -76,14 +77,17 @@ class ElasticsearchUncaughtExceptionHandler implements Thread.UncaughtExceptionH
// visible for testing
void onFatalUncaught(final String threadName, final Throwable t) {
final ESLogger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get());
logger.error("fatal error in thread [{}], exiting", t, threadName);
final Logger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get());
logger.error(
(org.apache.logging.log4j.util.Supplier<?>)
() -> new ParameterizedMessage("fatal error in thread [{}], exiting", threadName), t);
}
// visible for testing
void onNonFatalUncaught(final String threadName, final Throwable t) {
final ESLogger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get());
logger.warn("uncaught exception in thread [{}]", t, threadName);
final Logger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get());
logger.warn((org.apache.logging.log4j.util.Supplier<?>)
() -> new ParameterizedMessage("uncaught exception in thread [{}]", threadName), t);
}
// visible for testing

View File

@ -22,8 +22,8 @@ package org.elasticsearch.bootstrap;
import com.sun.jna.Native;
import com.sun.jna.NativeLong;
import com.sun.jna.Structure;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.Constants;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import java.util.Arrays;
@ -34,7 +34,7 @@ import java.util.List;
*/
final class JNACLibrary {
private static final ESLogger logger = Loggers.getLogger(JNACLibrary.class);
private static final Logger logger = Loggers.getLogger(JNACLibrary.class);
public static final int MCL_CURRENT = 1;
public static final int ENOMEM = 12;

View File

@ -25,8 +25,8 @@ import com.sun.jna.NativeLong;
import com.sun.jna.Pointer;
import com.sun.jna.Structure;
import com.sun.jna.win32.StdCallLibrary;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.Constants;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import java.util.ArrayList;
@ -40,7 +40,7 @@ import java.util.List;
*/
final class JNAKernel32Library {
private static final ESLogger logger = Loggers.getLogger(JNAKernel32Library.class);
private static final Logger logger = Loggers.getLogger(JNAKernel32Library.class);
// Callbacks must be kept around in order to be able to be called later,
// when the Windows ConsoleCtrlHandler sends an event.

View File

@ -21,8 +21,8 @@ package org.elasticsearch.bootstrap;
import com.sun.jna.Native;
import com.sun.jna.Pointer;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.Constants;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.monitor.jvm.JvmInfo;
@ -39,7 +39,7 @@ class JNANatives {
/** no instantiation */
private JNANatives() {}
private static final ESLogger logger = Loggers.getLogger(JNANatives.class);
private static final Logger logger = Loggers.getLogger(JNANatives.class);
// Set to true, in case native mlockall call was successful
static boolean LOCAL_MLOCKALL = false;

View File

@ -19,10 +19,10 @@
package org.elasticsearch.bootstrap;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import java.io.IOException;
@ -76,7 +76,7 @@ public class JarHell {
*/
public static void checkJarHell() throws Exception {
ClassLoader loader = JarHell.class.getClassLoader();
ESLogger logger = Loggers.getLogger(JarHell.class);
Logger logger = Loggers.getLogger(JarHell.class);
if (logger.isDebugEnabled()) {
logger.debug("java.class.path: {}", System.getProperty("java.class.path"));
logger.debug("sun.boot.class.path: {}", System.getProperty("sun.boot.class.path"));
@ -86,7 +86,7 @@ public class JarHell {
}
checkJarHell(parseClassPath());
}
/**
* Parses the classpath into an array of URLs
* @return array of URLs
@ -150,7 +150,7 @@ public class JarHell {
*/
@SuppressForbidden(reason = "needs JarFile for speed, just reading entries")
public static void checkJarHell(URL urls[]) throws Exception {
ESLogger logger = Loggers.getLogger(JarHell.class);
Logger logger = Loggers.getLogger(JarHell.class);
// we don't try to be sneaky and use deprecated/internal/not portable stuff
// like sun.boot.class.path, and with jigsaw we don't yet have a way to get
// a "list" at all. So just exclude any elements underneath the java home
@ -168,7 +168,7 @@ public class JarHell {
if (path.toString().endsWith(".jar")) {
if (!seenJars.add(path)) {
logger.debug("excluding duplicate classpath element: {}", path);
continue; // we can't fail because of sheistiness with joda-time
continue;
}
logger.debug("examining jar: {}", path);
try (JarFile file = new JarFile(path.toString())) {
@ -271,11 +271,13 @@ public class JarHell {
"class: " + clazz + System.lineSeparator() +
"exists multiple times in jar: " + jarpath + " !!!!!!!!!");
} else {
if (clazz.startsWith("org.apache.log4j")) {
return; // go figure, jar hell for what should be System.out.println...
}
if (clazz.equals("org.joda.time.base.BaseDateTime")) {
return; // apparently this is intentional... clean this up
if (clazz.startsWith("org.apache.logging.log4j.core.impl.ThrowableProxy")) {
/*
* deliberate to hack around a bug in Log4j
* cf. https://github.com/elastic/elasticsearch/issues/20304
* cf. https://issues.apache.org/jira/browse/LOG4J2-1560
*/
return;
}
throw new IllegalStateException("jar hell!" + System.lineSeparator() +
"class: " + clazz + System.lineSeparator() +

View File

@ -19,7 +19,7 @@
package org.elasticsearch.bootstrap;
import org.elasticsearch.common.logging.ESLogger;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.logging.Loggers;
import java.nio.file.Path;
@ -32,7 +32,7 @@ final class Natives {
/** no instantiation */
private Natives() {}
private static final ESLogger logger = Loggers.getLogger(Natives.class);
private static final Logger logger = Loggers.getLogger(Natives.class);
// marker to determine if the JNA class files are available to the JVM
static final boolean JNA_AVAILABLE;

View File

@ -26,9 +26,9 @@ import com.sun.jna.NativeLong;
import com.sun.jna.Pointer;
import com.sun.jna.Structure;
import com.sun.jna.ptr.PointerByReference;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.Constants;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import java.io.IOException;
@ -92,7 +92,7 @@ import java.util.Map;
*/
// not an example of how to write code!!!
final class Seccomp {
private static final ESLogger logger = Loggers.getLogger(Seccomp.class);
private static final Logger logger = Loggers.getLogger(Seccomp.class);
// Linux implementation, based on seccomp(2) or prctl(2) with bpf filtering

View File

@ -20,7 +20,8 @@
package org.elasticsearch.client.transport;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
@ -32,9 +33,8 @@ import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
import org.elasticsearch.client.Requests;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
@ -43,11 +43,11 @@ import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.util.concurrent.FutureUtils;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.ConnectTransportException;
import org.elasticsearch.transport.FutureTransportResponseHandler;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequestOptions;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService;
import java.io.Closeable;
@ -340,7 +340,7 @@ public class TransportClientNodesService extends AbstractComponent implements Cl
transportService.connectToNode(node);
} catch (Exception e) {
it.remove();
logger.debug("failed to connect to discovered node [{}]", e, node);
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to connect to discovered node [{}]", node), e);
}
}
}
@ -377,7 +377,9 @@ public class TransportClientNodesService extends AbstractComponent implements Cl
logger.trace("connecting to listed node (light) [{}]", listedNode);
transportService.connectToNodeLight(listedNode);
} catch (Exception e) {
logger.debug("failed to connect to node [{}], removed from nodes list", e, listedNode);
logger.debug(
(Supplier<?>)
() -> new ParameterizedMessage("failed to connect to node [{}], removed from nodes list", listedNode), e);
newFilteredNodes.add(listedNode);
continue;
}
@ -409,7 +411,8 @@ public class TransportClientNodesService extends AbstractComponent implements Cl
newNodes.add(listedNode);
}
} catch (Exception e) {
logger.info("failed to get node info for {}, disconnecting...", e, listedNode);
logger.info(
(Supplier<?>) () -> new ParameterizedMessage("failed to get node info for {}, disconnecting...", listedNode), e);
transportService.disconnectFromNode(listedNode);
}
}
@ -453,7 +456,9 @@ public class TransportClientNodesService extends AbstractComponent implements Cl
transportService.connectToNodeLight(listedNode);
}
} catch (Exception e) {
logger.debug("failed to connect to node [{}], ignoring...", e, listedNode);
logger.debug(
(Supplier<?>)
() -> new ParameterizedMessage("failed to connect to node [{}], ignoring...", listedNode), e);
latch.countDown();
return;
}
@ -482,13 +487,17 @@ public class TransportClientNodesService extends AbstractComponent implements Cl
@Override
public void handleException(TransportException e) {
logger.info("failed to get local cluster state for {}, disconnecting...", e, listedNode);
logger.info(
(Supplier<?>) () -> new ParameterizedMessage(
"failed to get local cluster state for {}, disconnecting...", listedNode), e);
transportService.disconnectFromNode(listedNode);
latch.countDown();
}
});
} catch (Exception e) {
logger.info("failed to get local cluster state info for {}, disconnecting...", e, listedNode);
logger.info(
(Supplier<?>)() -> new ParameterizedMessage(
"failed to get local cluster state info for {}, disconnecting...", listedNode), e);
transportService.disconnectFromNode(listedNode);
latch.countDown();
}

View File

@ -19,6 +19,7 @@
package org.elasticsearch.cluster;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
import org.elasticsearch.cluster.action.index.NodeMappingRefreshAction;
import org.elasticsearch.cluster.action.shard.ShardStateAction;
@ -44,17 +45,16 @@ import org.elasticsearch.cluster.routing.allocation.decider.ConcurrentRebalanceA
import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider;
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.NodeVersionAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.RebalanceOnlyWhenActiveAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryActiveAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;

View File

@ -19,10 +19,10 @@
package org.elasticsearch.cluster;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.ThreadContext;
@ -35,7 +35,7 @@ import java.util.concurrent.atomic.AtomicReference;
*/
public class ClusterStateObserver {
protected final ESLogger logger;
protected final Logger logger;
public final ChangePredicate MATCH_ALL_CHANGES_PREDICATE = new EventPredicate() {
@ -58,7 +58,7 @@ public class ClusterStateObserver {
volatile boolean timedOut;
public ClusterStateObserver(ClusterService clusterService, ESLogger logger, ThreadContext contextHolder) {
public ClusterStateObserver(ClusterService clusterService, Logger logger, ThreadContext contextHolder) {
this(clusterService, new TimeValue(60000), logger, contextHolder);
}
@ -67,7 +67,7 @@ public class ClusterStateObserver {
* will fail any existing or new #waitForNextChange calls. Set to null
* to wait indefinitely
*/
public ClusterStateObserver(ClusterService clusterService, @Nullable TimeValue timeout, ESLogger logger, ThreadContext contextHolder) {
public ClusterStateObserver(ClusterService clusterService, @Nullable TimeValue timeout, Logger logger, ThreadContext contextHolder) {
this.clusterService = clusterService;
this.lastObservedState = new AtomicReference<>(new ObservedState(clusterService.state()));
this.timeOutValue = timeout;

View File

@ -19,6 +19,7 @@
package org.elasticsearch.cluster;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.LatchedActionListener;
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
@ -39,7 +40,6 @@ import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
@ -379,7 +379,7 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
return clusterInfo;
}
static void buildShardLevelInfo(ESLogger logger, ShardStats[] stats, ImmutableOpenMap.Builder<String, Long> newShardSizes,
static void buildShardLevelInfo(Logger logger, ShardStats[] stats, ImmutableOpenMap.Builder<String, Long> newShardSizes,
ImmutableOpenMap.Builder<ShardRouting, String> newShardRoutingToDataPath, ClusterState state) {
MetaData meta = state.getMetaData();
for (ShardStats s : stats) {
@ -402,7 +402,7 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
}
}
static void fillDiskUsagePerNode(ESLogger logger, List<NodeStats> nodeStatsArray,
static void fillDiskUsagePerNode(Logger logger, List<NodeStats> nodeStatsArray,
ImmutableOpenMap.Builder<String, DiskUsage> newLeastAvaiableUsages,
ImmutableOpenMap.Builder<String, DiskUsage> newMostAvaiableUsages) {
for (NodeStats nodeStats : nodeStatsArray) {

View File

@ -18,6 +18,8 @@
*/
package org.elasticsearch.cluster;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
@ -91,7 +93,7 @@ public class NodeConnectionsService extends AbstractLifecycleComponent {
try {
transportService.disconnectFromNode(node);
} catch (Exception e) {
logger.warn("failed to disconnect to node [{}]", e, node);
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to disconnect to node [{}]", node), e);
}
}
}
@ -113,7 +115,11 @@ public class NodeConnectionsService extends AbstractLifecycleComponent {
nodeFailureCount = nodeFailureCount + 1;
// log every 6th failure
if ((nodeFailureCount % 6) == 1) {
logger.warn("failed to connect to node {} (tried [{}] times)", e, node, nodeFailureCount);
final int finalNodeFailureCount = nodeFailureCount;
logger.warn(
(Supplier<?>)
() -> new ParameterizedMessage(
"failed to connect to node {} (tried [{}] times)", node, finalNodeFailureCount), e);
}
nodes.put(node, nodeFailureCount);
}

View File

@ -19,6 +19,9 @@
package org.elasticsearch.cluster.action.shard;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.cluster.ClusterChangedEvent;
@ -43,7 +46,6 @@ import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.discovery.Discovery;
@ -108,7 +110,7 @@ public class ShardStateAction extends AbstractComponent {
if (isMasterChannelException(exp)) {
waitForNewMasterAndRetry(actionName, observer, shardEntry, listener);
} else {
logger.warn("{} unexpected failure while sending request [{}] to [{}] for shard entry [{}]", exp, shardEntry.shardId, actionName, masterNode, shardEntry);
logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} unexpected failure while sending request [{}] to [{}] for shard entry [{}]", shardEntry.shardId, actionName, masterNode, shardEntry), exp);
listener.onFailure(exp instanceof RemoteTransportException ? (Exception) (exp.getCause() instanceof Exception ? exp.getCause() : new ElasticsearchException(exp.getCause())) : exp);
}
}
@ -169,7 +171,7 @@ public class ShardStateAction extends AbstractComponent {
@Override
public void onClusterServiceClose() {
logger.warn("{} node closed while execution action [{}] for shard entry [{}]", shardEntry.failure, shardEntry.shardId, actionName, shardEntry);
logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} node closed while execution action [{}] for shard entry [{}]", shardEntry.shardId, actionName, shardEntry), shardEntry.failure);
listener.onFailure(new NodeClosedException(clusterService.localNode()));
}
@ -184,9 +186,9 @@ public class ShardStateAction extends AbstractComponent {
private static class ShardFailedTransportHandler implements TransportRequestHandler<ShardEntry> {
private final ClusterService clusterService;
private final ShardFailedClusterStateTaskExecutor shardFailedClusterStateTaskExecutor;
private final ESLogger logger;
private final Logger logger;
public ShardFailedTransportHandler(ClusterService clusterService, ShardFailedClusterStateTaskExecutor shardFailedClusterStateTaskExecutor, ESLogger logger) {
public ShardFailedTransportHandler(ClusterService clusterService, ShardFailedClusterStateTaskExecutor shardFailedClusterStateTaskExecutor, Logger logger) {
this.clusterService = clusterService;
this.shardFailedClusterStateTaskExecutor = shardFailedClusterStateTaskExecutor;
this.logger = logger;
@ -194,7 +196,7 @@ public class ShardStateAction extends AbstractComponent {
@Override
public void messageReceived(ShardEntry request, TransportChannel channel) throws Exception {
logger.warn("{} received shard failed for {}", request.failure, request.shardId, request);
logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} received shard failed for {}", request.shardId, request), request.failure);
clusterService.submitStateUpdateTask(
"shard-failed",
request,
@ -203,12 +205,12 @@ public class ShardStateAction extends AbstractComponent {
new ClusterStateTaskListener() {
@Override
public void onFailure(String source, Exception e) {
logger.error("{} unexpected failure while failing shard [{}]", e, request.shardId, request);
logger.error((Supplier<?>) () -> new ParameterizedMessage("{} unexpected failure while failing shard [{}]", request.shardId, request), e);
try {
channel.sendResponse(e);
} catch (Exception channelException) {
channelException.addSuppressed(e);
logger.warn("{} failed to send failure [{}] while failing shard [{}]", channelException, request.shardId, e, request);
logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} failed to send failure [{}] while failing shard [{}]", request.shardId, e, request), channelException);
}
}
@ -218,7 +220,7 @@ public class ShardStateAction extends AbstractComponent {
try {
channel.sendResponse(new NotMasterException(source));
} catch (Exception channelException) {
logger.warn("{} failed to send no longer master while failing shard [{}]", channelException, request.shardId, request);
logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} failed to send no longer master while failing shard [{}]", request.shardId, request), channelException);
}
}
@ -227,7 +229,7 @@ public class ShardStateAction extends AbstractComponent {
try {
channel.sendResponse(TransportResponse.Empty.INSTANCE);
} catch (Exception channelException) {
logger.warn("{} failed to send response while failing shard [{}]", channelException, request.shardId, request);
logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} failed to send response while failing shard [{}]", request.shardId, request), channelException);
}
}
}
@ -238,9 +240,9 @@ public class ShardStateAction extends AbstractComponent {
public static class ShardFailedClusterStateTaskExecutor implements ClusterStateTaskExecutor<ShardEntry> {
private final AllocationService allocationService;
private final RoutingService routingService;
private final ESLogger logger;
private final Logger logger;
public ShardFailedClusterStateTaskExecutor(AllocationService allocationService, RoutingService routingService, ESLogger logger) {
public ShardFailedClusterStateTaskExecutor(AllocationService allocationService, RoutingService routingService, Logger logger) {
this.allocationService = allocationService;
this.routingService = routingService;
this.logger = logger;
@ -315,7 +317,7 @@ public class ShardStateAction extends AbstractComponent {
}
batchResultBuilder.successes(tasksToBeApplied);
} catch (Exception e) {
logger.warn("failed to apply failed shards {}", e, shardRoutingsToBeApplied);
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to apply failed shards {}", shardRoutingsToBeApplied), e);
// failures are communicated back to the requester
// cluster state will not be updated in this case
batchResultBuilder.failures(tasksToBeApplied, e);
@ -352,9 +354,9 @@ public class ShardStateAction extends AbstractComponent {
private static class ShardStartedTransportHandler implements TransportRequestHandler<ShardEntry> {
private final ClusterService clusterService;
private final ShardStartedClusterStateTaskExecutor shardStartedClusterStateTaskExecutor;
private final ESLogger logger;
private final Logger logger;
public ShardStartedTransportHandler(ClusterService clusterService, ShardStartedClusterStateTaskExecutor shardStartedClusterStateTaskExecutor, ESLogger logger) {
public ShardStartedTransportHandler(ClusterService clusterService, ShardStartedClusterStateTaskExecutor shardStartedClusterStateTaskExecutor, Logger logger) {
this.clusterService = clusterService;
this.shardStartedClusterStateTaskExecutor = shardStartedClusterStateTaskExecutor;
this.logger = logger;
@ -375,9 +377,9 @@ public class ShardStateAction extends AbstractComponent {
public static class ShardStartedClusterStateTaskExecutor implements ClusterStateTaskExecutor<ShardEntry>, ClusterStateTaskListener {
private final AllocationService allocationService;
private final ESLogger logger;
private final Logger logger;
public ShardStartedClusterStateTaskExecutor(AllocationService allocationService, ESLogger logger) {
public ShardStartedClusterStateTaskExecutor(AllocationService allocationService, Logger logger) {
this.allocationService = allocationService;
this.logger = logger;
}
@ -431,7 +433,7 @@ public class ShardStateAction extends AbstractComponent {
}
builder.successes(tasksToBeApplied);
} catch (Exception e) {
logger.warn("failed to apply started shards {}", e, shardRoutingsToBeApplied);
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to apply started shards {}", shardRoutingsToBeApplied), e);
builder.failures(tasksToBeApplied, e);
}
@ -440,7 +442,7 @@ public class ShardStateAction extends AbstractComponent {
@Override
public void onFailure(String source, Exception e) {
logger.error("unexpected failure during [{}]", e, source);
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
}
}

View File

@ -22,6 +22,7 @@ package org.elasticsearch.cluster.metadata;
import com.carrotsearch.hppc.ObjectHashSet;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.cluster.Diff;
import org.elasticsearch.cluster.Diffable;
@ -38,7 +39,6 @@ import org.elasticsearch.common.collect.HppcMaps;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
@ -758,7 +758,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
/** As of 2.0 we require units for time and byte-sized settings. This methods adds default units to any cluster settings that don't
* specify a unit. */
public static MetaData addDefaultUnitsIfNeeded(ESLogger logger, MetaData metaData) {
public static MetaData addDefaultUnitsIfNeeded(Logger logger, MetaData metaData) {
Settings.Builder newPersistentSettings = null;
for(Map.Entry<String,String> ent : metaData.persistentSettings().getAsMap().entrySet()) {
String settingName = ent.getKey();

View File

@ -21,6 +21,8 @@ package org.elasticsearch.cluster.metadata;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
@ -446,9 +448,9 @@ public class MetaDataCreateIndexService extends AbstractComponent {
@Override
public void onFailure(String source, Exception e) {
if (e instanceof IndexAlreadyExistsException) {
logger.trace("[{}] failed to create", e, request.index());
logger.trace((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to create", request.index()), e);
} else {
logger.debug("[{}] failed to create", e, request.index());
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to create", request.index()), e);
}
super.onFailure(source, e);
}

View File

@ -20,7 +20,6 @@
package org.elasticsearch.cluster.metadata;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesClusterStateUpdateRequest;
@ -64,7 +63,7 @@ public class MetaDataIndexAliasesService extends AbstractComponent {
private final AliasValidator aliasValidator;
private final NodeServicesProvider nodeServicesProvider;
private final MetaDataDeleteIndexService deleteIndexService;
@Inject

View File

@ -20,6 +20,8 @@
package org.elasticsearch.cluster.metadata;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingClusterStateUpdateRequest;
import org.elasticsearch.cluster.AckedClusterStateTaskListener;
@ -193,7 +195,7 @@ public class MetaDataMappingService extends AbstractComponent {
}
}
} catch (Exception e) {
logger.warn("[{}] failed to refresh-mapping in cluster state", e, index);
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to refresh-mapping in cluster state", index), e);
}
return dirty;
}
@ -207,7 +209,7 @@ public class MetaDataMappingService extends AbstractComponent {
refreshTask,
ClusterStateTaskConfig.build(Priority.HIGH),
refreshExecutor,
(source, e) -> logger.warn("failure during [{}]", e, source)
(source, e) -> logger.warn((Supplier<?>) () -> new ParameterizedMessage("failure during [{}]", source), e)
);
}

View File

@ -21,6 +21,7 @@ package org.elasticsearch.cluster.routing;
import com.carrotsearch.hppc.ObjectIntHashMap;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
@ -30,7 +31,6 @@ import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.shard.ShardId;
@ -452,7 +452,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
*
* @return the started shard
*/
public ShardRouting startShard(ESLogger logger, ShardRouting initializingShard, RoutingChangesObserver routingChangesObserver) {
public ShardRouting startShard(Logger logger, ShardRouting initializingShard, RoutingChangesObserver routingChangesObserver) {
ensureMutable();
ShardRouting startedShard = started(initializingShard);
logger.trace("{} marked shard as started (routing: {})", initializingShard.shardId(), initializingShard);
@ -484,7 +484,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
* - If shard is a (primary or replica) relocation target, this also clears the relocation information on the source shard.
*
*/
public void failShard(ESLogger logger, ShardRouting failedShard, UnassignedInfo unassignedInfo, IndexMetaData indexMetaData,
public void failShard(Logger logger, ShardRouting failedShard, UnassignedInfo unassignedInfo, IndexMetaData indexMetaData,
RoutingChangesObserver routingChangesObserver) {
ensureMutable();
assert failedShard.assignedToNode() : "only assigned shards can be failed";

View File

@ -19,6 +19,8 @@
package org.elasticsearch.cluster.routing;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateUpdateTask;
@ -113,16 +115,16 @@ public class RoutingService extends AbstractLifecycleComponent {
rerouting.set(false);
ClusterState state = clusterService.state();
if (logger.isTraceEnabled()) {
logger.error("unexpected failure during [{}], current state:\n{}", e, source, state.prettyPrint());
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}], current state:\n{}", source, state.prettyPrint()), e);
} else {
logger.error("unexpected failure during [{}], current state version [{}]", e, source, state.version());
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}], current state version [{}]", source, state.version()), e);
}
}
});
} catch (Exception e) {
rerouting.set(false);
ClusterState state = clusterService.state();
logger.warn("failed to reroute routing table, current state:\n{}", e, state.prettyPrint());
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to reroute routing table, current state:\n{}", state.prettyPrint()), e);
}
}
}

View File

@ -27,8 +27,8 @@ import org.elasticsearch.cluster.Diffable;
import org.elasticsearch.cluster.DiffableUtils;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@ -88,6 +88,11 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
return indicesRouting.containsKey(index);
}
public boolean hasIndex(Index index) {
IndexRoutingTable indexRouting = index(index.getName());
return indexRouting != null && indexRouting.getIndex().equals(index);
}
public IndexRoutingTable index(String index) {
return indicesRouting.get(index);
}

View File

@ -19,6 +19,7 @@
package org.elasticsearch.cluster.routing.allocation.allocator;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.IntroSorter;
import org.elasticsearch.cluster.metadata.IndexMetaData;
@ -37,7 +38,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
@ -209,7 +209,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
* A {@link Balancer}
*/
public static class Balancer {
private final ESLogger logger;
private final Logger logger;
private final Map<String, ModelNode> nodes = new HashMap<>();
private final RoutingAllocation allocation;
private final RoutingNodes routingNodes;
@ -219,7 +219,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
private final MetaData metaData;
private final float avgShardsPerNode;
public Balancer(ESLogger logger, RoutingAllocation allocation, WeightFunction weight, float threshold) {
public Balancer(Logger logger, RoutingAllocation allocation, WeightFunction weight, float threshold) {
this.logger = logger;
this.allocation = allocation;
this.weight = weight;

View File

@ -78,11 +78,10 @@ public class DiskThresholdDecider extends AllocationDecider {
* Returns the size of all shards that are currently being relocated to
* the node, but may not be finished transferring yet.
*
* If subtractShardsMovingAway is set then the size of shards moving away is subtracted from the total size
* of all shards
* If subtractShardsMovingAway is true then the size of shards moving away is subtracted from the total size of all shards
*/
static long sizeOfRelocatingShards(RoutingNode node, RoutingAllocation allocation,
boolean subtractShardsMovingAway, String dataPath) {
boolean subtractShardsMovingAway, String dataPath) {
ClusterInfo clusterInfo = allocation.clusterInfo();
long totalSize = 0;
for (ShardRouting routing : node.shardsWithState(ShardRoutingState.RELOCATING, ShardRoutingState.INITIALIZING)) {
@ -111,7 +110,9 @@ public class DiskThresholdDecider extends AllocationDecider {
final double usedDiskThresholdLow = 100.0 - diskThresholdSettings.getFreeDiskThresholdLow();
final double usedDiskThresholdHigh = 100.0 - diskThresholdSettings.getFreeDiskThresholdHigh();
DiskUsage usage = getDiskUsage(node, allocation, usages);
// subtractLeavingShards is passed as false here, because they still use disk space, and therefore should we should be extra careful
// and take the size into account
DiskUsage usage = getDiskUsage(node, allocation, usages, false);
// First, check that the node currently over the low watermark
double freeDiskPercentage = usage.getFreeDiskAsPercentage();
// Cache the used disk percentage for displaying disk percentages consistent with documentation
@ -243,7 +244,9 @@ public class DiskThresholdDecider extends AllocationDecider {
return decision;
}
final DiskUsage usage = getDiskUsage(node, allocation, usages);
// subtractLeavingShards is passed as true here, since this is only for shards remaining, we will *eventually* have enough disk
// since shards are moving away. No new shards will be incoming since in canAllocate we pass false for this check.
final DiskUsage usage = getDiskUsage(node, allocation, usages, true);
final String dataPath = clusterInfo.getDataPath(shardRouting);
// If this node is already above the high threshold, the shard cannot remain (get it off!)
final double freeDiskPercentage = usage.getFreeDiskAsPercentage();
@ -280,7 +283,8 @@ public class DiskThresholdDecider extends AllocationDecider {
"there is enough disk on this node for the shard to remain, free: [%s]", new ByteSizeValue(freeBytes));
}
private DiskUsage getDiskUsage(RoutingNode node, RoutingAllocation allocation, ImmutableOpenMap<String, DiskUsage> usages) {
private DiskUsage getDiskUsage(RoutingNode node, RoutingAllocation allocation,
ImmutableOpenMap<String, DiskUsage> usages, boolean subtractLeavingShards) {
DiskUsage usage = usages.get(node.nodeId());
if (usage == null) {
// If there is no usage, and we have other nodes in the cluster,
@ -293,7 +297,7 @@ public class DiskThresholdDecider extends AllocationDecider {
}
if (diskThresholdSettings.includeRelocations()) {
long relocatingShardsSize = sizeOfRelocatingShards(node, allocation, true, usage.getPath());
long relocatingShardsSize = sizeOfRelocatingShards(node, allocation, subtractLeavingShards, usage.getPath());
DiskUsage usageIncludingRelocations = new DiskUsage(node.nodeId(), node.node().getName(), usage.getPath(),
usage.getTotalBytes(), usage.getFreeBytes() - relocatingShardsSize);
if (logger.isTraceEnabled()) {

View File

@ -19,6 +19,9 @@
package org.elasticsearch.cluster.service;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.cluster.AckedClusterStateTaskListener;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterName;
@ -43,7 +46,6 @@ import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
@ -554,9 +556,16 @@ public class ClusterService extends AbstractLifecycleComponent {
} catch (Exception e) {
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
if (logger.isTraceEnabled()) {
logger.trace("failed to execute cluster state update in [{}], state:\nversion [{}], source [{}]\n{}{}{}", e, executionTime,
previousClusterState.version(), tasksSummary, previousClusterState.nodes().prettyPrint(),
previousClusterState.routingTable().prettyPrint(), previousClusterState.getRoutingNodes().prettyPrint());
logger.trace(
(Supplier<?>) () -> new ParameterizedMessage(
"failed to execute cluster state update in [{}], state:\nversion [{}], source [{}]\n{}{}{}",
executionTime,
previousClusterState.version(),
tasksSummary,
previousClusterState.nodes().prettyPrint(),
previousClusterState.routingTable().prettyPrint(),
previousClusterState.getRoutingNodes().prettyPrint()),
e);
}
warnAboutSlowTaskIfNeeded(executionTime, tasksSummary);
batchResult = ClusterStateTaskExecutor.BatchResult.<T>builder()
@ -587,7 +596,9 @@ public class ClusterService extends AbstractLifecycleComponent {
executionResult.handle(
() -> proccessedListeners.add(updateTask),
ex -> {
logger.debug("cluster state update task {} failed", ex, updateTask.toString(executor));
logger.debug(
(Supplier<?>)
() -> new ParameterizedMessage("cluster state update task {} failed", updateTask.toString(executor)), ex);
updateTask.listener.onFailure(updateTask.source, ex);
}
);
@ -670,7 +681,11 @@ public class ClusterService extends AbstractLifecycleComponent {
try {
clusterStatePublisher.accept(clusterChangedEvent, ackListener);
} catch (Discovery.FailedToCommitClusterStateException t) {
logger.warn("failing [{}]: failed to commit cluster state version [{}]", t, tasksSummary, newClusterState.version());
final long version = newClusterState.version();
logger.warn(
(Supplier<?>) () -> new ParameterizedMessage(
"failing [{}]: failed to commit cluster state version [{}]", tasksSummary, version),
t);
proccessedListeners.forEach(task -> task.listener.onFailure(task.source, t));
return;
}
@ -713,7 +728,10 @@ public class ClusterService extends AbstractLifecycleComponent {
try {
ackListener.onNodeAck(newClusterState.nodes().getLocalNode(), null);
} catch (Exception e) {
logger.debug("error while processing ack for master node [{}]", e, newClusterState.nodes().getLocalNode());
final DiscoveryNode localNode = newClusterState.nodes().getLocalNode();
logger.debug(
(Supplier<?>) () -> new ParameterizedMessage("error while processing ack for master node [{}]", localNode),
e);
}
}
@ -724,7 +742,11 @@ public class ClusterService extends AbstractLifecycleComponent {
try {
executor.clusterStatePublished(clusterChangedEvent);
} catch (Exception e) {
logger.error("exception thrown while notifying executor of new cluster state publication [{}]", e, tasksSummary);
logger.error(
(Supplier<?>) () -> new ParameterizedMessage(
"exception thrown while notifying executor of new cluster state publication [{}]",
tasksSummary),
e);
}
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
@ -733,8 +755,18 @@ public class ClusterService extends AbstractLifecycleComponent {
warnAboutSlowTaskIfNeeded(executionTime, tasksSummary);
} catch (Exception e) {
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
logger.warn("failed to apply updated cluster state in [{}]:\nversion [{}], uuid [{}], source [{}]\n{}", e, executionTime,
newClusterState.version(), newClusterState.stateUUID(), tasksSummary, newClusterState.prettyPrint());
final long version = newClusterState.version();
final String stateUUID = newClusterState.stateUUID();
final String prettyPrint = newClusterState.prettyPrint();
logger.warn(
(Supplier<?>) () -> new ParameterizedMessage(
"failed to apply updated cluster state in [{}]:\nversion [{}], uuid [{}], source [{}]\n{}",
executionTime,
version,
stateUUID,
tasksSummary,
prettyPrint),
e);
// TODO: do we want to call updateTask.onFailure here?
}
@ -743,7 +775,7 @@ public class ClusterService extends AbstractLifecycleComponent {
// this one is overridden in tests so we can control time
protected long currentTimeInNanos() {return System.nanoTime();}
private static SafeClusterStateTaskListener safe(ClusterStateTaskListener listener, ESLogger logger) {
private static SafeClusterStateTaskListener safe(ClusterStateTaskListener listener, Logger logger) {
if (listener instanceof AckedClusterStateTaskListener) {
return new SafeAckedClusterStateTaskListener((AckedClusterStateTaskListener) listener, logger);
} else {
@ -753,9 +785,9 @@ public class ClusterService extends AbstractLifecycleComponent {
private static class SafeClusterStateTaskListener implements ClusterStateTaskListener {
private final ClusterStateTaskListener listener;
private final ESLogger logger;
private final Logger logger;
public SafeClusterStateTaskListener(ClusterStateTaskListener listener, ESLogger logger) {
public SafeClusterStateTaskListener(ClusterStateTaskListener listener, Logger logger) {
this.listener = listener;
this.logger = logger;
}
@ -766,7 +798,9 @@ public class ClusterService extends AbstractLifecycleComponent {
listener.onFailure(source, e);
} catch (Exception inner) {
inner.addSuppressed(e);
logger.error("exception thrown by listener notifying of failure from [{}]", inner, source);
logger.error(
(Supplier<?>) () -> new ParameterizedMessage(
"exception thrown by listener notifying of failure from [{}]", source), inner);
}
}
@ -775,7 +809,9 @@ public class ClusterService extends AbstractLifecycleComponent {
try {
listener.onNoLongerMaster(source);
} catch (Exception e) {
logger.error("exception thrown by listener while notifying no longer master from [{}]", e, source);
logger.error(
(Supplier<?>) () -> new ParameterizedMessage(
"exception thrown by listener while notifying no longer master from [{}]", source), e);
}
}
@ -785,21 +821,22 @@ public class ClusterService extends AbstractLifecycleComponent {
listener.clusterStateProcessed(source, oldState, newState);
} catch (Exception e) {
logger.error(
(Supplier<?>) () -> new ParameterizedMessage(
"exception thrown by listener while notifying of cluster state processed from [{}], old cluster state:\n" +
"{}\nnew cluster state:\n{}",
e,
"{}\nnew cluster state:\n{}",
source,
oldState.prettyPrint(),
newState.prettyPrint());
newState.prettyPrint()),
e);
}
}
}
private static class SafeAckedClusterStateTaskListener extends SafeClusterStateTaskListener implements AckedClusterStateTaskListener {
private final AckedClusterStateTaskListener listener;
private final ESLogger logger;
private final Logger logger;
public SafeAckedClusterStateTaskListener(AckedClusterStateTaskListener listener, ESLogger logger) {
public SafeAckedClusterStateTaskListener(AckedClusterStateTaskListener listener, Logger logger) {
super(listener, logger);
this.listener = listener;
this.logger = logger;
@ -996,7 +1033,7 @@ public class ClusterService extends AbstractLifecycleComponent {
private static class AckCountDownListener implements Discovery.AckListener {
private static final ESLogger logger = Loggers.getLogger(AckCountDownListener.class);
private static final Logger logger = Loggers.getLogger(AckCountDownListener.class);
private final AckedClusterStateTaskListener ackedTaskListener;
private final CountDown countDown;
@ -1040,7 +1077,10 @@ public class ClusterService extends AbstractLifecycleComponent {
logger.trace("ack received from node [{}], cluster_state update (version: {})", node, clusterStateVersion);
} else {
this.lastFailure = e;
logger.debug("ack received from node [{}], cluster_state update (version: {})", e, node, clusterStateVersion);
logger.debug(
(Supplier<?>) () -> new ParameterizedMessage(
"ack received from node [{}], cluster_state update (version: {})", node, clusterStateVersion),
e);
}
if (countDown.countDown()) {

View File

@ -16,13 +16,14 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common;
package org.elasticsearch.common;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Annotation to suppress logging usage checks errors inside a whole class or a method.
*/

View File

@ -19,7 +19,7 @@
package org.elasticsearch.common.breaker;
import org.elasticsearch.common.logging.ESLogger;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.indices.breaker.BreakerSettings;
import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
@ -36,7 +36,7 @@ public class ChildMemoryCircuitBreaker implements CircuitBreaker {
private final double overheadConstant;
private final AtomicLong used;
private final AtomicLong trippedCount;
private final ESLogger logger;
private final Logger logger;
private final HierarchyCircuitBreakerService parent;
private final String name;
@ -48,7 +48,7 @@ public class ChildMemoryCircuitBreaker implements CircuitBreaker {
* @param parent parent circuit breaker service to delegate tripped breakers to
* @param name the name of the breaker
*/
public ChildMemoryCircuitBreaker(BreakerSettings settings, ESLogger logger,
public ChildMemoryCircuitBreaker(BreakerSettings settings, Logger logger,
HierarchyCircuitBreakerService parent, String name) {
this(settings, null, logger, parent, name);
}
@ -64,7 +64,7 @@ public class ChildMemoryCircuitBreaker implements CircuitBreaker {
* @param oldBreaker the previous circuit breaker to inherit the used value from (starting offset)
*/
public ChildMemoryCircuitBreaker(BreakerSettings settings, ChildMemoryCircuitBreaker oldBreaker,
ESLogger logger, HierarchyCircuitBreakerService parent, String name) {
Logger logger, HierarchyCircuitBreakerService parent, String name) {
this.name = name;
this.settings = settings;
this.memoryBytesLimit = settings.getLimit();

View File

@ -18,7 +18,7 @@
*/
package org.elasticsearch.common.breaker;
import org.elasticsearch.common.logging.ESLogger;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.unit.ByteSizeValue;
import java.util.concurrent.atomic.AtomicLong;
@ -33,7 +33,7 @@ public class MemoryCircuitBreaker implements CircuitBreaker {
private final double overheadConstant;
private final AtomicLong used;
private final AtomicLong trippedCount;
private final ESLogger logger;
private final Logger logger;
/**
@ -43,7 +43,7 @@ public class MemoryCircuitBreaker implements CircuitBreaker {
* @param limit circuit breaker limit
* @param overheadConstant constant multiplier for byte estimations
*/
public MemoryCircuitBreaker(ByteSizeValue limit, double overheadConstant, ESLogger logger) {
public MemoryCircuitBreaker(ByteSizeValue limit, double overheadConstant, Logger logger) {
this(limit, overheadConstant, null, logger);
}
@ -56,7 +56,7 @@ public class MemoryCircuitBreaker implements CircuitBreaker {
* @param overheadConstant constant multiplier for byte estimations
* @param oldBreaker the previous circuit breaker to inherit the used value from (starting offset)
*/
public MemoryCircuitBreaker(ByteSizeValue limit, double overheadConstant, MemoryCircuitBreaker oldBreaker, ESLogger logger) {
public MemoryCircuitBreaker(ByteSizeValue limit, double overheadConstant, MemoryCircuitBreaker oldBreaker, Logger logger) {
this.memoryBytesLimit = limit.bytes();
this.overheadConstant = overheadConstant;
if (oldBreaker == null) {

View File

@ -19,19 +19,17 @@
package org.elasticsearch.common.component;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.node.Node;
/**
*
*/
public abstract class AbstractComponent {
protected final ESLogger logger;
protected final Logger logger;
protected final DeprecationLogger deprecationLogger;
protected final Settings settings;
@ -42,7 +40,7 @@ public abstract class AbstractComponent {
}
public AbstractComponent(Settings settings, Class customClass) {
this.logger = Loggers.getLogger(customClass, settings);
this.logger = LogManager.getLogger(customClass);
this.deprecationLogger = new DeprecationLogger(logger);
this.settings = settings;
}
@ -71,4 +69,5 @@ public abstract class AbstractComponent {
deprecationLogger.deprecated("Setting [{}] has been removed, use [{}] instead", settingName, alternativeName);
}
}
}

View File

@ -19,20 +19,15 @@
package org.elasticsearch.common.geo.builders;
import org.locationtech.spatial4j.context.jts.JtsSpatialContext;
import org.locationtech.spatial4j.exception.InvalidShapeException;
import org.locationtech.spatial4j.shape.Shape;
import org.locationtech.spatial4j.shape.jts.JtsGeometry;
import com.vividsolutions.jts.geom.Coordinate;
import com.vividsolutions.jts.geom.Geometry;
import com.vividsolutions.jts.geom.GeometryFactory;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.action.support.ToXContentToBytes;
import org.elasticsearch.common.io.stream.NamedWriteable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.unit.DistanceUnit.Distance;
import org.elasticsearch.common.xcontent.ToXContent;
@ -40,6 +35,10 @@ import org.elasticsearch.common.xcontent.XContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.mapper.GeoShapeFieldMapper;
import org.locationtech.spatial4j.context.jts.JtsSpatialContext;
import org.locationtech.spatial4j.exception.InvalidShapeException;
import org.locationtech.spatial4j.shape.Shape;
import org.locationtech.spatial4j.shape.jts.JtsGeometry;
import java.io.IOException;
import java.util.ArrayList;
@ -53,7 +52,7 @@ import java.util.Locale;
*/
public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWriteable {
protected static final ESLogger LOGGER = ESLoggerFactory.getLogger(ShapeBuilder.class.getName());
protected static final Logger LOGGER = ESLoggerFactory.getLogger(ShapeBuilder.class.getName());
private static final boolean DEBUG;
static {

View File

@ -16,6 +16,7 @@
package org.elasticsearch.common.inject.spi;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.inject.Binder;
import org.elasticsearch.common.inject.Binding;
@ -40,7 +41,6 @@ import org.elasticsearch.common.inject.internal.PrivateElementsImpl;
import org.elasticsearch.common.inject.internal.ProviderMethodsModule;
import org.elasticsearch.common.inject.internal.SourceProvider;
import org.elasticsearch.common.inject.matcher.Matcher;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import java.lang.annotation.Annotation;
@ -351,7 +351,7 @@ public final class Elements {
return builder;
}
private static ESLogger logger = Loggers.getLogger(Elements.class);
private static Logger logger = Loggers.getLogger(Elements.class);
protected Object getSource() {
Object ret;

Some files were not shown because too many files have changed in this diff Show More