diff --git a/.gitignore b/.gitignore index af7e64fc895..d1810a5a83f 100644 --- a/.gitignore +++ b/.gitignore @@ -20,6 +20,11 @@ nbactions.xml .gradle/ build/ +# gradle wrapper +/gradle/ +gradlew +gradlew.bat + # maven stuff (to be removed when trunk becomes 4.x) *-execution-hints.log target/ diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy index 0c2e37ab821..0fefecc1446 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy @@ -18,6 +18,7 @@ */ package org.elasticsearch.gradle.doc +import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.test.RestTestPlugin import org.gradle.api.Project import org.gradle.api.Task @@ -30,9 +31,19 @@ public class DocsTestPlugin extends RestTestPlugin { @Override public void apply(Project project) { super.apply(project) + Map defaultSubstitutions = [ + /* These match up with the asciidoc syntax for substitutions but + * the values may differ. In particular {version} needs to resolve + * to the version being built for testing but needs to resolve to + * the last released version for docs. */ + '\\{version\\}': + VersionProperties.elasticsearch.replace('-SNAPSHOT', ''), + '\\{lucene_version\\}' : VersionProperties.lucene, + ] Task listSnippets = project.tasks.create('listSnippets', SnippetsTask) listSnippets.group 'Docs' listSnippets.description 'List each snippet' + listSnippets.defaultSubstitutions = defaultSubstitutions listSnippets.perSnippet { println(it.toString()) } Task listConsoleCandidates = project.tasks.create( @@ -40,6 +51,7 @@ public class DocsTestPlugin extends RestTestPlugin { listConsoleCandidates.group 'Docs' listConsoleCandidates.description 'List snippets that probably should be marked // CONSOLE' + listConsoleCandidates.defaultSubstitutions = defaultSubstitutions listConsoleCandidates.perSnippet { if ( it.console != null // Already marked, nothing to do @@ -47,19 +59,17 @@ public class DocsTestPlugin extends RestTestPlugin { ) { return } - List languages = [ - // This language should almost always be marked console - 'js', - // These are often curl commands that should be converted but - // are probably false positives - 'sh', 'shell', - ] - if (false == languages.contains(it.language)) { - return + if ( // js almost always should be `// CONSOLE` + it.language == 'js' || + // snippets containing `curl` *probably* should + // be `// CONSOLE` + it.curl) { + println(it.toString()) } - println(it.toString()) } - project.tasks.create('buildRestTests', RestTestsFromSnippetsTask) + Task buildRestTests = project.tasks.create( + 'buildRestTests', RestTestsFromSnippetsTask) + buildRestTests.defaultSubstitutions = defaultSubstitutions } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy index fc7604ad1fd..dc4e6f5f70a 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy @@ -146,6 +146,9 @@ public class RestTestsFromSnippetsTask extends SnippetsTask { void emitDo(String method, String pathAndQuery, String body, String catchPart, List warnings, boolean inSetup) { def (String path, String query) = pathAndQuery.tokenize('?') + if (path == null) { + path = '' // Catch requests to the root... + } current.println(" - do:") if (catchPart != null) { current.println(" catch: $catchPart") diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy index 8c3524a9b9f..41f74b45be1 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy @@ -22,6 +22,7 @@ package org.elasticsearch.gradle.doc import org.gradle.api.DefaultTask import org.gradle.api.InvalidUserDataException import org.gradle.api.file.ConfigurableFileTree +import org.gradle.api.tasks.Input import org.gradle.api.tasks.InputFiles import org.gradle.api.tasks.TaskAction @@ -60,6 +61,12 @@ public class SnippetsTask extends DefaultTask { exclude 'build' } + /** + * Substitutions done on every snippet's contents. + */ + @Input + Map defaultSubstitutions = [:] + @TaskAction public void executeTask() { /* @@ -75,21 +82,39 @@ public class SnippetsTask extends DefaultTask { Closure emit = { snippet.contents = contents.toString() contents = null + Closure doSubstitution = { String pattern, String subst -> + /* + * $body is really common but it looks like a + * backreference so we just escape it here to make the + * tests cleaner. + */ + subst = subst.replace('$body', '\\$body') + // \n is a new line.... + subst = subst.replace('\\n', '\n') + snippet.contents = snippet.contents.replaceAll( + pattern, subst) + } + defaultSubstitutions.each doSubstitution if (substitutions != null) { - substitutions.each { String pattern, String subst -> - /* - * $body is really common but it looks like a - * backreference so we just escape it here to make the - * tests cleaner. - */ - subst = subst.replace('$body', '\\$body') - // \n is a new line.... - subst = subst.replace('\\n', '\n') - snippet.contents = snippet.contents.replaceAll( - pattern, subst) - } + substitutions.each doSubstitution substitutions = null } + if (snippet.language == null) { + throw new InvalidUserDataException("$snippet: " + + "Snippet missing a language. This is required by " + + "Elasticsearch's doc testing infrastructure so we " + + "be sure we don't accidentally forget to test a " + + "snippet.") + } + // Try to detect snippets that contain `curl` + if (snippet.language == 'sh' || snippet.language == 'shell') { + snippet.curl = snippet.contents.contains('curl') + if (snippet.console == false && snippet.curl == false) { + throw new InvalidUserDataException("$snippet: " + + "No need for NOTCONSOLE if snippet doesn't " + + "contain `curl`.") + } + } perSnippet(snippet) snippet = null } @@ -107,7 +132,7 @@ public class SnippetsTask extends DefaultTask { } return } - matcher = line =~ /\[source,(\w+)]\s*/ + matcher = line =~ /\["?source"?,\s*"?(\w+)"?(,.*)?].*/ if (matcher.matches()) { lastLanguage = matcher.group(1) lastLanguageLine = lineNumber @@ -250,6 +275,7 @@ public class SnippetsTask extends DefaultTask { String language = null String catchPart = null String setup = null + boolean curl List warnings = new ArrayList() @Override @@ -285,6 +311,9 @@ public class SnippetsTask extends DefaultTask { if (testSetup) { result += '// TESTSETUP' } + if (curl) { + result += '(curl)' + } return result } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy index b7b563bf158..0a454ee1006 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy @@ -97,8 +97,8 @@ public class PluginBuildPlugin extends BuildPlugin { // with a full elasticsearch server that includes optional deps provided "org.locationtech.spatial4j:spatial4j:${project.versions.spatial4j}" provided "com.vividsolutions:jts:${project.versions.jts}" - provided "log4j:log4j:${project.versions.log4j}" - provided "log4j:apache-log4j-extras:${project.versions.log4j}" + provided "org.apache.logging.log4j:log4j-api:${project.versions.log4j}" + provided "org.apache.logging.log4j:log4j-core:${project.versions.log4j}" provided "net.java.dev.jna:jna:${project.versions.jna}" } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy index a5e1e4c8932..44eb050dfb5 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy @@ -59,7 +59,8 @@ class PrecommitTasks { * use the NamingConventionsCheck we break the circular dependency * here. */ - precommitTasks.add(configureLoggerUsage(project)) + // https://github.com/elastic/elasticsearch/issues/20243 + // precommitTasks.add(configureLoggerUsage(project)) } diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index d9b92a9768f..14c2bc8ca5a 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -10,6 +10,9 @@ + + + @@ -21,7 +24,6 @@ - @@ -301,7 +303,6 @@ - @@ -386,7 +387,6 @@ - @@ -469,9 +469,7 @@ - - @@ -627,7 +625,7 @@ - + @@ -865,7 +863,6 @@ - @@ -905,8 +902,6 @@ - - @@ -994,7 +989,6 @@ - diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 7ac4d1f7ed1..e96f9824595 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -6,7 +6,7 @@ spatial4j = 0.6 jts = 1.13 jackson = 2.8.1 snakeyaml = 1.15 -log4j = 1.2.17 +log4j = 2.6.2 slf4j = 1.6.2 jna = 4.2.2 diff --git a/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/ops/bulk/BulkBenchmarkTask.java b/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/ops/bulk/BulkBenchmarkTask.java index daf7213ed51..214a75d12cc 100644 --- a/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/ops/bulk/BulkBenchmarkTask.java +++ b/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/ops/bulk/BulkBenchmarkTask.java @@ -18,13 +18,13 @@ */ package org.elasticsearch.client.benchmark.ops.bulk; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.client.benchmark.BenchmarkTask; import org.elasticsearch.client.benchmark.metrics.Sample; import org.elasticsearch.client.benchmark.metrics.SampleRecorder; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.PathUtils; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.ESLoggerFactory; import java.io.BufferedReader; @@ -135,7 +135,7 @@ public class BulkBenchmarkTask implements BenchmarkTask { private static final class BulkIndexer implements Runnable { - private static final ESLogger logger = ESLoggerFactory.getLogger(BulkIndexer.class.getName()); + private static final Logger logger = ESLoggerFactory.getLogger(BulkIndexer.class.getName()); private final BlockingQueue> bulkData; private final int warmupIterations; diff --git a/core/build.gradle b/core/build.gradle index 0e87c21757b..4eab7ed5d58 100644 --- a/core/build.gradle +++ b/core/build.gradle @@ -85,8 +85,10 @@ dependencies { compile "com.vividsolutions:jts:${versions.jts}", optional // logging - compile "log4j:log4j:${versions.log4j}", optional - compile "log4j:apache-log4j-extras:${versions.log4j}", optional + compile "org.apache.logging.log4j:log4j-api:${versions.log4j}", optional + compile "org.apache.logging.log4j:log4j-core:${versions.log4j}", optional + // to bridge dependencies that are still on Log4j 1 to Log4j 2 + compile "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}", optional compile "net.java.dev.jna:jna:${versions.jna}" @@ -154,32 +156,94 @@ thirdPartyAudit.excludes = [ // classes are missing! // from com.fasterxml.jackson.dataformat.yaml.YAMLMapper (jackson-dataformat-yaml) - 'com.fasterxml.jackson.databind.ObjectMapper', + 'com.fasterxml.jackson.databind.ObjectMapper', - // from org.apache.log4j.receivers.net.JMSReceiver (log4j-extras) - 'javax.jms.Message', - 'javax.jms.MessageListener', - 'javax.jms.ObjectMessage', - 'javax.jms.TopicConnection', - 'javax.jms.TopicConnectionFactory', - 'javax.jms.TopicPublisher', - 'javax.jms.TopicSession', - 'javax.jms.TopicSubscriber', + // from log4j + 'com.fasterxml.jackson.annotation.JsonInclude$Include', + 'com.fasterxml.jackson.databind.DeserializationContext', + 'com.fasterxml.jackson.databind.JsonMappingException', + 'com.fasterxml.jackson.databind.JsonNode', + 'com.fasterxml.jackson.databind.Module$SetupContext', + 'com.fasterxml.jackson.databind.ObjectReader', + 'com.fasterxml.jackson.databind.ObjectWriter', + 'com.fasterxml.jackson.databind.SerializerProvider', + 'com.fasterxml.jackson.databind.deser.std.StdDeserializer', + 'com.fasterxml.jackson.databind.deser.std.StdScalarDeserializer', + 'com.fasterxml.jackson.databind.module.SimpleModule', + 'com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter', + 'com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider', + 'com.fasterxml.jackson.databind.ser.std.StdScalarSerializer', + 'com.fasterxml.jackson.databind.ser.std.StdSerializer', + 'com.fasterxml.jackson.dataformat.xml.JacksonXmlModule', + 'com.fasterxml.jackson.dataformat.xml.XmlMapper', + 'com.fasterxml.jackson.dataformat.xml.util.DefaultXmlPrettyPrinter', + 'com.lmax.disruptor.BlockingWaitStrategy', + 'com.lmax.disruptor.BusySpinWaitStrategy', + 'com.lmax.disruptor.EventFactory', + 'com.lmax.disruptor.EventTranslator', + 'com.lmax.disruptor.EventTranslatorTwoArg', + 'com.lmax.disruptor.EventTranslatorVararg', + 'com.lmax.disruptor.ExceptionHandler', + 'com.lmax.disruptor.LifecycleAware', + 'com.lmax.disruptor.RingBuffer', + 'com.lmax.disruptor.Sequence', + 'com.lmax.disruptor.SequenceReportingEventHandler', + 'com.lmax.disruptor.SleepingWaitStrategy', + 'com.lmax.disruptor.TimeoutBlockingWaitStrategy', + 'com.lmax.disruptor.WaitStrategy', + 'com.lmax.disruptor.YieldingWaitStrategy', + 'com.lmax.disruptor.dsl.Disruptor', + 'com.lmax.disruptor.dsl.ProducerType', + 'javax.jms.Connection', + 'javax.jms.ConnectionFactory', + 'javax.jms.Destination', + 'javax.jms.Message', + 'javax.jms.MessageConsumer', + 'javax.jms.MessageListener', + 'javax.jms.MessageProducer', + 'javax.jms.ObjectMessage', + 'javax.jms.Session', + 'javax.mail.Authenticator', + 'javax.mail.Message$RecipientType', + 'javax.mail.PasswordAuthentication', + 'javax.mail.Session', + 'javax.mail.Transport', + 'javax.mail.internet.InternetAddress', + 'javax.mail.internet.InternetHeaders', + 'javax.mail.internet.MimeBodyPart', + 'javax.mail.internet.MimeMessage', + 'javax.mail.internet.MimeMultipart', + 'javax.mail.internet.MimeUtility', + 'javax.mail.util.ByteArrayDataSource', + 'javax.persistence.AttributeConverter', + 'javax.persistence.EntityManager', + 'javax.persistence.EntityManagerFactory', + 'javax.persistence.EntityTransaction', + 'javax.persistence.Persistence', + 'javax.persistence.PersistenceException', + 'org.apache.commons.compress.compressors.CompressorStreamFactory', + 'org.apache.commons.compress.utils.IOUtils', + 'org.apache.commons.csv.CSVFormat', + 'org.apache.commons.csv.QuoteMode', + 'org.apache.kafka.clients.producer.KafkaProducer', + 'org.apache.kafka.clients.producer.Producer', + 'org.apache.kafka.clients.producer.ProducerRecord', + 'org.codehaus.stax2.XMLStreamWriter2', + 'org.osgi.framework.AdaptPermission', + 'org.osgi.framework.AdminPermission', + 'org.osgi.framework.Bundle', + 'org.osgi.framework.BundleActivator', + 'org.osgi.framework.BundleContext', + 'org.osgi.framework.BundleEvent', + 'org.osgi.framework.BundleReference', + 'org.osgi.framework.FrameworkUtil', + 'org.osgi.framework.SynchronousBundleListener', + 'org.osgi.framework.wiring.BundleWire', + 'org.osgi.framework.wiring.BundleWiring', + 'org.zeromq.ZMQ$Context', + 'org.zeromq.ZMQ$Socket', + 'org.zeromq.ZMQ', - // from org.apache.log4j.net.SMTPAppender (log4j) - 'javax.mail.Authenticator', - 'javax.mail.Message$RecipientType', - 'javax.mail.Message', - 'javax.mail.Multipart', - 'javax.mail.PasswordAuthentication', - 'javax.mail.Session', - 'javax.mail.Transport', - 'javax.mail.internet.InternetAddress', - 'javax.mail.internet.InternetHeaders', - 'javax.mail.internet.MimeBodyPart', - 'javax.mail.internet.MimeMessage', - 'javax.mail.internet.MimeMultipart', - 'javax.mail.internet.MimeUtility', // from org.locationtech.spatial4j.io.GeoJSONReader (spatial4j) 'org.noggit.JSONParser', ] diff --git a/core/src/main/java/org/apache/log4j/Java9Hack.java b/core/src/main/java/org/apache/log4j/Java9Hack.java deleted file mode 100644 index 831cf5b35ae..00000000000 --- a/core/src/main/java/org/apache/log4j/Java9Hack.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.log4j; - -import org.apache.log4j.helpers.ThreadLocalMap; - -/** - * Log4j 1.2 MDC breaks because it parses java.version incorrectly (does not handle new java9 versioning). - * - * This hack fixes up the pkg private members as if it had detected the java version correctly. - */ -public class Java9Hack { - - public static void fixLog4j() { - if (MDC.mdc.tlm == null) { - MDC.mdc.java1 = false; - MDC.mdc.tlm = new ThreadLocalMap(); - } - } -} diff --git a/core/src/main/java/org/apache/log4j/package-info.java b/core/src/main/java/org/apache/log4j/package-info.java deleted file mode 100644 index f628016aa6d..00000000000 --- a/core/src/main/java/org/apache/log4j/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/** - * Hack to fix Log4j 1.2 in Java 9. - */ -package org.apache.log4j; diff --git a/core/src/main/java/org/apache/logging/log4j/core/impl/ThrowableProxy.java b/core/src/main/java/org/apache/logging/log4j/core/impl/ThrowableProxy.java new file mode 100644 index 00000000000..37ab0a15391 --- /dev/null +++ b/core/src/main/java/org/apache/logging/log4j/core/impl/ThrowableProxy.java @@ -0,0 +1,665 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache license, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the license for the specific language governing permissions and + * limitations under the license. + */ + +package org.apache.logging.log4j.core.impl; + +import java.io.Serializable; +import java.net.URL; +import java.security.CodeSource; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.Stack; + +import org.apache.logging.log4j.core.util.Loader; +import org.apache.logging.log4j.status.StatusLogger; +import org.apache.logging.log4j.util.ReflectionUtil; +import org.apache.logging.log4j.util.Strings; + +/** + * Wraps a Throwable to add packaging information about each stack trace element. + * + *

+ * A proxy is used to represent a throwable that may not exist in a different class loader or JVM. When an application + * deserializes a ThrowableProxy, the throwable may not be set, but the throwable's information is preserved in other + * fields of the proxy like the message and stack trace. + *

+ * + *

+ * TODO: Move this class to org.apache.logging.log4j.core because it is used from LogEvent. + *

+ *

+ * TODO: Deserialize: Try to rebuild Throwable if the target exception is in this class loader? + *

+ */ +public class ThrowableProxy implements Serializable { + + private static final String CAUSED_BY_LABEL = "Caused by: "; + private static final String SUPPRESSED_LABEL = "Suppressed: "; + private static final String WRAPPED_BY_LABEL = "Wrapped by: "; + + /** + * Cached StackTracePackageElement and ClassLoader. + *

+ * Consider this class private. + *

+ */ + static class CacheEntry { + private final ExtendedClassInfo element; + private final ClassLoader loader; + + public CacheEntry(final ExtendedClassInfo element, final ClassLoader loader) { + this.element = element; + this.loader = loader; + } + } + + private static final ThrowableProxy[] EMPTY_THROWABLE_PROXY_ARRAY = new ThrowableProxy[0]; + + private static final char EOL = '\n'; + + private static final long serialVersionUID = -2752771578252251910L; + + private final ThrowableProxy causeProxy; + + private int commonElementCount; + + private final ExtendedStackTraceElement[] extendedStackTrace; + + private final String localizedMessage; + + private final String message; + + private final String name; + + private final ThrowableProxy[] suppressedProxies; + + private final transient Throwable throwable; + + /** + * For JSON and XML IO via Jackson. + */ + @SuppressWarnings("unused") + private ThrowableProxy() { + this.throwable = null; + this.name = null; + this.extendedStackTrace = null; + this.causeProxy = null; + this.message = null; + this.localizedMessage = null; + this.suppressedProxies = EMPTY_THROWABLE_PROXY_ARRAY; + } + + /** + * Constructs the wrapper for the Throwable that includes packaging data. + * + * @param throwable + * The Throwable to wrap, must not be null. + */ + public ThrowableProxy(final Throwable throwable) { + this(throwable, null); + } + + /** + * Constructs the wrapper for the Throwable that includes packaging data. + * + * @param throwable + * The Throwable to wrap, must not be null. + * @param visited + * The set of visited suppressed exceptions. + */ + private ThrowableProxy(final Throwable throwable, final Set visited) { + this.throwable = throwable; + this.name = throwable.getClass().getName(); + this.message = throwable.getMessage(); + this.localizedMessage = throwable.getLocalizedMessage(); + final Map map = new HashMap<>(); + final Stack> stack = ReflectionUtil.getCurrentStackTrace(); + this.extendedStackTrace = this.toExtendedStackTrace(stack, map, null, throwable.getStackTrace()); + final Throwable throwableCause = throwable.getCause(); + final Set causeVisited = new HashSet<>(1); + this.causeProxy = throwableCause == null ? null : new ThrowableProxy(throwable, stack, map, throwableCause, visited, causeVisited); + this.suppressedProxies = this.toSuppressedProxies(throwable, visited); + } + + /** + * Constructs the wrapper for a Throwable that is referenced as the cause by another Throwable. + * + * @param parent + * The Throwable referencing this Throwable. + * @param stack + * The Class stack. + * @param map + * The cache containing the packaging data. + * @param cause + * The Throwable to wrap. + * @param suppressedVisited TODO + * @param causeVisited TODO + */ + private ThrowableProxy(final Throwable parent, final Stack> stack, final Map map, + final Throwable cause, final Set suppressedVisited, final Set causeVisited) { + causeVisited.add(cause); + this.throwable = cause; + this.name = cause.getClass().getName(); + this.message = this.throwable.getMessage(); + this.localizedMessage = this.throwable.getLocalizedMessage(); + this.extendedStackTrace = this.toExtendedStackTrace(stack, map, parent.getStackTrace(), cause.getStackTrace()); + final Throwable causeCause = cause.getCause(); + this.causeProxy = causeCause == null || causeVisited.contains(causeCause) ? null : new ThrowableProxy(parent, + stack, map, causeCause, suppressedVisited, causeVisited); + this.suppressedProxies = this.toSuppressedProxies(cause, suppressedVisited); + } + + @Override + public boolean equals(final Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (this.getClass() != obj.getClass()) { + return false; + } + final ThrowableProxy other = (ThrowableProxy) obj; + if (this.causeProxy == null) { + if (other.causeProxy != null) { + return false; + } + } else if (!this.causeProxy.equals(other.causeProxy)) { + return false; + } + if (this.commonElementCount != other.commonElementCount) { + return false; + } + if (this.name == null) { + if (other.name != null) { + return false; + } + } else if (!this.name.equals(other.name)) { + return false; + } + if (!Arrays.equals(this.extendedStackTrace, other.extendedStackTrace)) { + return false; + } + if (!Arrays.equals(this.suppressedProxies, other.suppressedProxies)) { + return false; + } + return true; + } + + private void formatCause(final StringBuilder sb, final String prefix, final ThrowableProxy cause, final List ignorePackages) { + formatThrowableProxy(sb, prefix, CAUSED_BY_LABEL, cause, ignorePackages); + } + + private void formatThrowableProxy(final StringBuilder sb, final String prefix, final String causeLabel, + final ThrowableProxy throwableProxy, final List ignorePackages) { + if (throwableProxy == null) { + return; + } + sb.append(prefix).append(causeLabel).append(throwableProxy).append(EOL); + this.formatElements(sb, prefix, throwableProxy.commonElementCount, + throwableProxy.getStackTrace(), throwableProxy.extendedStackTrace, ignorePackages); + this.formatSuppressed(sb, prefix + "\t", throwableProxy.suppressedProxies, ignorePackages); + this.formatCause(sb, prefix, throwableProxy.causeProxy, ignorePackages); + } + + private void formatSuppressed(final StringBuilder sb, final String prefix, final ThrowableProxy[] suppressedProxies, + final List ignorePackages) { + if (suppressedProxies == null) { + return; + } + for (final ThrowableProxy suppressedProxy : suppressedProxies) { + final ThrowableProxy cause = suppressedProxy; + formatThrowableProxy(sb, prefix, SUPPRESSED_LABEL, cause, ignorePackages); + } + } + + private void formatElements(final StringBuilder sb, final String prefix, final int commonCount, + final StackTraceElement[] causedTrace, final ExtendedStackTraceElement[] extStackTrace, + final List ignorePackages) { + if (ignorePackages == null || ignorePackages.isEmpty()) { + for (final ExtendedStackTraceElement element : extStackTrace) { + this.formatEntry(element, sb, prefix); + } + } else { + int count = 0; + for (int i = 0; i < extStackTrace.length; ++i) { + if (!this.ignoreElement(causedTrace[i], ignorePackages)) { + if (count > 0) { + appendSuppressedCount(sb, prefix, count); + count = 0; + } + this.formatEntry(extStackTrace[i], sb, prefix); + } else { + ++count; + } + } + if (count > 0) { + appendSuppressedCount(sb, prefix, count); + } + } + if (commonCount != 0) { + sb.append(prefix).append("\t... ").append(commonCount).append(" more").append(EOL); + } + } + + private void appendSuppressedCount(final StringBuilder sb, final String prefix, final int count) { + sb.append(prefix); + if (count == 1) { + sb.append("\t....").append(EOL); + } else { + sb.append("\t... suppressed ").append(count).append(" lines").append(EOL); + } + } + + private void formatEntry(final ExtendedStackTraceElement extStackTraceElement, final StringBuilder sb, final String prefix) { + sb.append(prefix); + sb.append("\tat "); + sb.append(extStackTraceElement); + sb.append(EOL); + } + + /** + * Formats the specified Throwable. + * + * @param sb + * StringBuilder to contain the formatted Throwable. + * @param cause + * The Throwable to format. + */ + public void formatWrapper(final StringBuilder sb, final ThrowableProxy cause) { + this.formatWrapper(sb, cause, null); + } + + /** + * Formats the specified Throwable. + * + * @param sb + * StringBuilder to contain the formatted Throwable. + * @param cause + * The Throwable to format. + * @param packages + * The List of packages to be suppressed from the trace. + */ + @SuppressWarnings("ThrowableResultOfMethodCallIgnored") + public void formatWrapper(final StringBuilder sb, final ThrowableProxy cause, final List packages) { + final Throwable caused = cause.getCauseProxy() != null ? cause.getCauseProxy().getThrowable() : null; + if (caused != null) { + this.formatWrapper(sb, cause.causeProxy); + sb.append(WRAPPED_BY_LABEL); + } + sb.append(cause).append(EOL); + this.formatElements(sb, "", cause.commonElementCount, + cause.getThrowable().getStackTrace(), cause.extendedStackTrace, packages); + } + + public ThrowableProxy getCauseProxy() { + return this.causeProxy; + } + + /** + * Format the Throwable that is the cause of this Throwable. + * + * @return The formatted Throwable that caused this Throwable. + */ + public String getCauseStackTraceAsString() { + return this.getCauseStackTraceAsString(null); + } + + /** + * Format the Throwable that is the cause of this Throwable. + * + * @param packages + * The List of packages to be suppressed from the trace. + * @return The formatted Throwable that caused this Throwable. + */ + public String getCauseStackTraceAsString(final List packages) { + final StringBuilder sb = new StringBuilder(); + if (this.causeProxy != null) { + this.formatWrapper(sb, this.causeProxy); + sb.append(WRAPPED_BY_LABEL); + } + sb.append(this.toString()); + sb.append(EOL); + this.formatElements(sb, "", 0, this.throwable.getStackTrace(), this.extendedStackTrace, packages); + return sb.toString(); + } + + /** + * Return the number of elements that are being omitted because they are common with the parent Throwable's stack + * trace. + * + * @return The number of elements omitted from the stack trace. + */ + public int getCommonElementCount() { + return this.commonElementCount; + } + + /** + * Gets the stack trace including packaging information. + * + * @return The stack trace including packaging information. + */ + public ExtendedStackTraceElement[] getExtendedStackTrace() { + return this.extendedStackTrace; + } + + /** + * Format the stack trace including packaging information. + * + * @return The formatted stack trace including packaging information. + */ + public String getExtendedStackTraceAsString() { + return this.getExtendedStackTraceAsString(null); + } + + /** + * Format the stack trace including packaging information. + * + * @param ignorePackages + * List of packages to be ignored in the trace. + * @return The formatted stack trace including packaging information. + */ + public String getExtendedStackTraceAsString(final List ignorePackages) { + final StringBuilder sb = new StringBuilder(this.name); + final String msg = this.message; + if (msg != null) { + sb.append(": ").append(msg); + } + sb.append(EOL); + final StackTraceElement[] causedTrace = this.throwable != null ? this.throwable.getStackTrace() : null; + this.formatElements(sb, "", 0, causedTrace, this.extendedStackTrace, ignorePackages); + this.formatSuppressed(sb, "\t", this.suppressedProxies, ignorePackages); + this.formatCause(sb, "", this.causeProxy, ignorePackages); + return sb.toString(); + } + + public String getLocalizedMessage() { + return this.localizedMessage; + } + + public String getMessage() { + return this.message; + } + + /** + * Return the FQCN of the Throwable. + * + * @return The FQCN of the Throwable. + */ + public String getName() { + return this.name; + } + + public StackTraceElement[] getStackTrace() { + return this.throwable == null ? null : this.throwable.getStackTrace(); + } + + /** + * Gets proxies for suppressed exceptions. + * + * @return proxies for suppressed exceptions. + */ + public ThrowableProxy[] getSuppressedProxies() { + return this.suppressedProxies; + } + + /** + * Format the suppressed Throwables. + * + * @return The formatted suppressed Throwables. + */ + public String getSuppressedStackTrace() { + final ThrowableProxy[] suppressed = this.getSuppressedProxies(); + if (suppressed == null || suppressed.length == 0) { + return Strings.EMPTY; + } + final StringBuilder sb = new StringBuilder("Suppressed Stack Trace Elements:").append(EOL); + for (final ThrowableProxy proxy : suppressed) { + sb.append(proxy.getExtendedStackTraceAsString()); + } + return sb.toString(); + } + + /** + * The throwable or null if this object is deserialized from XML or JSON. + * + * @return The throwable or null if this object is deserialized from XML or JSON. + */ + public Throwable getThrowable() { + return this.throwable; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + (this.causeProxy == null ? 0 : this.causeProxy.hashCode()); + result = prime * result + this.commonElementCount; + result = prime * result + (this.extendedStackTrace == null ? 0 : Arrays.hashCode(this.extendedStackTrace)); + result = prime * result + (this.suppressedProxies == null ? 0 : Arrays.hashCode(this.suppressedProxies)); + result = prime * result + (this.name == null ? 0 : this.name.hashCode()); + return result; + } + + private boolean ignoreElement(final StackTraceElement element, final List ignorePackages) { + final String className = element.getClassName(); + for (final String pkg : ignorePackages) { + if (className.startsWith(pkg)) { + return true; + } + } + return false; + } + + /** + * Loads classes not located via Reflection.getCallerClass. + * + * @param lastLoader + * The ClassLoader that loaded the Class that called this Class. + * @param className + * The name of the Class. + * @return The Class object for the Class or null if it could not be located. + */ + private Class loadClass(final ClassLoader lastLoader, final String className) { + // XXX: this is overly complicated + Class clazz; + if (lastLoader != null) { + try { + clazz = Loader.initializeClass(className, lastLoader); + if (clazz != null) { + return clazz; + } + } catch (final Throwable ignore) { + // Ignore exception. + } + } + try { + clazz = Loader.loadClass(className); + } catch (final ClassNotFoundException ignored) { + return initializeClass(className); + } catch (final NoClassDefFoundError ignored) { + return initializeClass(className); + } catch (final SecurityException ignored) { + return initializeClass(className); + } + return clazz; + } + + private Class initializeClass(final String className) { + try { + return Loader.initializeClass(className, this.getClass().getClassLoader()); + } catch (final ClassNotFoundException ignore) { + return null; + } catch (final NoClassDefFoundError ignore) { + return null; + } catch (final SecurityException ignore) { + return null; + } + } + + /** + * Construct the CacheEntry from the Class's information. + * + * @param stackTraceElement + * The stack trace element + * @param callerClass + * The Class. + * @param exact + * True if the class was obtained via Reflection.getCallerClass. + * + * @return The CacheEntry. + */ + private CacheEntry toCacheEntry(final StackTraceElement stackTraceElement, final Class callerClass, + final boolean exact) { + String location = "?"; + String version = "?"; + ClassLoader lastLoader = null; + if (callerClass != null) { + try { + final CodeSource source = callerClass.getProtectionDomain().getCodeSource(); + if (source != null) { + final URL locationURL = source.getLocation(); + if (locationURL != null) { + final String str = locationURL.toString().replace('\\', '/'); + int index = str.lastIndexOf("/"); + if (index >= 0 && index == str.length() - 1) { + index = str.lastIndexOf("/", index - 1); + location = str.substring(index + 1); + } else { + location = str.substring(index + 1); + } + } + } + } catch (final Exception ex) { + // Ignore the exception. + } + final Package pkg = callerClass.getPackage(); + if (pkg != null) { + final String ver = pkg.getImplementationVersion(); + if (ver != null) { + version = ver; + } + } + lastLoader = callerClass.getClassLoader(); + } + return new CacheEntry(new ExtendedClassInfo(exact, location, version), lastLoader); + } + + /** + * Resolve all the stack entries in this stack trace that are not common with the parent. + * + * @param stack + * The callers Class stack. + * @param map + * The cache of CacheEntry objects. + * @param rootTrace + * The first stack trace resolve or null. + * @param stackTrace + * The stack trace being resolved. + * @return The StackTracePackageElement array. + */ + ExtendedStackTraceElement[] toExtendedStackTrace(final Stack> stack, final Map map, + final StackTraceElement[] rootTrace, final StackTraceElement[] stackTrace) { + int stackLength; + if (rootTrace != null) { + int rootIndex = rootTrace.length - 1; + int stackIndex = stackTrace.length - 1; + while (rootIndex >= 0 && stackIndex >= 0 && rootTrace[rootIndex].equals(stackTrace[stackIndex])) { + --rootIndex; + --stackIndex; + } + this.commonElementCount = stackTrace.length - 1 - stackIndex; + stackLength = stackIndex + 1; + } else { + this.commonElementCount = 0; + stackLength = stackTrace.length; + } + final ExtendedStackTraceElement[] extStackTrace = new ExtendedStackTraceElement[stackLength]; + Class clazz = stack.isEmpty() ? null : stack.peek(); + ClassLoader lastLoader = null; + for (int i = stackLength - 1; i >= 0; --i) { + final StackTraceElement stackTraceElement = stackTrace[i]; + final String className = stackTraceElement.getClassName(); + // The stack returned from getCurrentStack may be missing entries for java.lang.reflect.Method.invoke() + // and its implementation. The Throwable might also contain stack entries that are no longer + // present as those methods have returned. + ExtendedClassInfo extClassInfo; + if (clazz != null && className.equals(clazz.getName())) { + final CacheEntry entry = this.toCacheEntry(stackTraceElement, clazz, true); + extClassInfo = entry.element; + lastLoader = entry.loader; + stack.pop(); + clazz = stack.isEmpty() ? null : stack.peek(); + } else { + final CacheEntry cacheEntry = map.get(className); + if (cacheEntry != null) { + final CacheEntry entry = cacheEntry; + extClassInfo = entry.element; + if (entry.loader != null) { + lastLoader = entry.loader; + } + } else { + final CacheEntry entry = this.toCacheEntry(stackTraceElement, + this.loadClass(lastLoader, className), false); + extClassInfo = entry.element; + map.put(stackTraceElement.toString(), entry); + if (entry.loader != null) { + lastLoader = entry.loader; + } + } + } + extStackTrace[i] = new ExtendedStackTraceElement(stackTraceElement, extClassInfo); + } + return extStackTrace; + } + + @Override + public String toString() { + final String msg = this.message; + return msg != null ? this.name + ": " + msg : this.name; + } + + private ThrowableProxy[] toSuppressedProxies(final Throwable thrown, Set suppressedVisited) { + try { + final Throwable[] suppressed = thrown.getSuppressed(); + if (suppressed == null) { + return EMPTY_THROWABLE_PROXY_ARRAY; + } + final List proxies = new ArrayList<>(suppressed.length); + if (suppressedVisited == null) { + suppressedVisited = new HashSet<>(proxies.size()); + } + for (int i = 0; i < suppressed.length; i++) { + final Throwable candidate = suppressed[i]; + if (!suppressedVisited.contains(candidate)) { + suppressedVisited.add(candidate); + proxies.add(new ThrowableProxy(candidate, suppressedVisited)); + } + } + return proxies.toArray(new ThrowableProxy[proxies.size()]); + } catch (final Exception e) { + StatusLogger.getLogger().error(e); + } + return null; + } +} diff --git a/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java b/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java index 9bbe08208d7..bcf0a2b201a 100644 --- a/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java +++ b/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java @@ -102,7 +102,6 @@ public class MapperQueryParser extends QueryParser { setLowercaseExpandedTerms(settings.lowercaseExpandedTerms()); setPhraseSlop(settings.phraseSlop()); setDefaultOperator(settings.defaultOperator()); - setFuzzyMinSim(settings.fuzziness().asFloat()); setFuzzyPrefixLength(settings.fuzzyPrefixLength()); setLocale(settings.locale()); } @@ -114,7 +113,7 @@ public class MapperQueryParser extends QueryParser { @Override Query handleBareFuzzy(String qfield, Token fuzzySlop, String termImage) throws ParseException { if (fuzzySlop.image.length() == 1) { - return getFuzzyQuery(qfield, termImage, Float.toString(fuzzyMinSim)); + return getFuzzyQuery(qfield, termImage, Float.toString(settings.fuzziness().asDistance(termImage))); } return getFuzzyQuery(qfield, termImage, fuzzySlop.image.substring(1)); } diff --git a/core/src/main/java/org/elasticsearch/ElasticsearchException.java b/core/src/main/java/org/elasticsearch/ElasticsearchException.java index 7830d521ff5..750f133ea17 100644 --- a/core/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/core/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -19,6 +19,7 @@ package org.elasticsearch; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.support.replication.ReplicationOperation; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.common.io.stream.StreamInput; diff --git a/core/src/main/java/org/elasticsearch/ExceptionsHelper.java b/core/src/main/java/org/elasticsearch/ExceptionsHelper.java index 772daab2c75..c30662a0934 100644 --- a/core/src/main/java/org/elasticsearch/ExceptionsHelper.java +++ b/core/src/main/java/org/elasticsearch/ExceptionsHelper.java @@ -19,12 +19,12 @@ package org.elasticsearch; +import org.apache.logging.log4j.Logger; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexFormatTooNewException; import org.apache.lucene.index.IndexFormatTooOldException; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.index.Index; import org.elasticsearch.rest.RestStatus; @@ -39,7 +39,7 @@ import java.util.Set; public final class ExceptionsHelper { - private static final ESLogger logger = Loggers.getLogger(ExceptionsHelper.class); + private static final Logger logger = Loggers.getLogger(ExceptionsHelper.class); public static RuntimeException convertToRuntime(Exception e) { if (e instanceof RuntimeException) { diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java index 0d707e035c7..3b4282c9b56 100644 --- a/core/src/main/java/org/elasticsearch/Version.java +++ b/core/src/main/java/org/elasticsearch/Version.java @@ -73,6 +73,8 @@ public class Version { public static final Version V_2_3_4 = new Version(V_2_3_4_ID, org.apache.lucene.util.Version.LUCENE_5_5_0); public static final int V_2_3_5_ID = 2030599; public static final Version V_2_3_5 = new Version(V_2_3_5_ID, org.apache.lucene.util.Version.LUCENE_5_5_0); + public static final int V_2_4_0_ID = 2040099; + public static final Version V_2_4_0 = new Version(V_2_4_0_ID, org.apache.lucene.util.Version.LUCENE_5_5_2); public static final int V_5_0_0_alpha1_ID = 5000001; public static final Version V_5_0_0_alpha1 = new Version(V_5_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_0_0); public static final int V_5_0_0_alpha2_ID = 5000002; @@ -110,6 +112,8 @@ public class Version { return V_5_0_0_alpha2; case V_5_0_0_alpha1_ID: return V_5_0_0_alpha1; + case V_2_4_0_ID: + return V_2_4_0; case V_2_3_5_ID: return V_2_3_5; case V_2_3_4_ID: diff --git a/core/src/main/java/org/elasticsearch/action/ActionModule.java b/core/src/main/java/org/elasticsearch/action/ActionModule.java index ca5349661c5..1be1ddda9a4 100644 --- a/core/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/core/src/main/java/org/elasticsearch/action/ActionModule.java @@ -20,7 +20,6 @@ package org.elasticsearch.action; import java.util.ArrayList; -import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -335,7 +334,7 @@ public class ActionModule extends AbstractModule { this.actionPlugins = actionPlugins; actions = setupActions(actionPlugins); actionFilters = setupActionFilters(actionPlugins, ingestEnabled); - autoCreateIndex = transportClient ? null : new AutoCreateIndex(settings, resolver); + autoCreateIndex = transportClient ? null : new AutoCreateIndex(settings, clusterSettings, resolver); destructiveOperations = new DestructiveOperations(settings, clusterSettings); Set headers = actionPlugins.stream().flatMap(p -> p.getRestHeaders().stream()).collect(Collectors.toSet()); restController = new RestController(settings, headers); diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java index f07048f8d51..93140794240 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -19,6 +19,8 @@ package org.elasticsearch.action.admin.cluster.health; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActiveShardCount; @@ -106,7 +108,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction< @Override public void onFailure(String source, Exception e) { - logger.error("unexpected failure during [{}]", e, source); + logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); listener.onFailure(e); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java index d7ce899792f..dd243cf6d88 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java @@ -37,10 +37,6 @@ import org.elasticsearch.threadpool.ThreadPoolInfo; import org.elasticsearch.transport.TransportInfo; import java.io.IOException; -import java.util.HashMap; -import java.util.Map; - -import static java.util.Collections.unmodifiableMap; /** * Node information (static, does not change over time). @@ -85,8 +81,8 @@ public class NodeInfo extends BaseNodeResponse { public NodeInfo(Version version, Build build, DiscoveryNode node, @Nullable Settings settings, @Nullable OsInfo os, @Nullable ProcessInfo process, @Nullable JvmInfo jvm, @Nullable ThreadPoolInfo threadPool, - @Nullable TransportInfo transport, @Nullable HttpInfo http, @Nullable PluginsAndModules plugins, @Nullable IngestInfo ingest, - @Nullable ByteSizeValue totalIndexingBuffer) { + @Nullable TransportInfo transport, @Nullable HttpInfo http, @Nullable PluginsAndModules plugins, + @Nullable IngestInfo ingest, @Nullable ByteSizeValue totalIndexingBuffer) { super(node); this.version = version; this.build = build; @@ -205,31 +201,14 @@ public class NodeInfo extends BaseNodeResponse { if (in.readBoolean()) { settings = Settings.readSettingsFromStream(in); } - if (in.readBoolean()) { - os = OsInfo.readOsInfo(in); - } - if (in.readBoolean()) { - process = ProcessInfo.readProcessInfo(in); - } - if (in.readBoolean()) { - jvm = JvmInfo.readJvmInfo(in); - } - if (in.readBoolean()) { - threadPool = ThreadPoolInfo.readThreadPoolInfo(in); - } - if (in.readBoolean()) { - transport = TransportInfo.readTransportInfo(in); - } - if (in.readBoolean()) { - http = HttpInfo.readHttpInfo(in); - } - if (in.readBoolean()) { - plugins = new PluginsAndModules(); - plugins.readFrom(in); - } - if (in.readBoolean()) { - ingest = new IngestInfo(in); - } + os = in.readOptionalWriteable(OsInfo::new); + process = in.readOptionalWriteable(ProcessInfo::new); + jvm = in.readOptionalWriteable(JvmInfo::new); + threadPool = in.readOptionalWriteable(ThreadPoolInfo::new); + transport = in.readOptionalWriteable(TransportInfo::new); + http = in.readOptionalWriteable(HttpInfo::new); + plugins = in.readOptionalWriteable(PluginsAndModules::new); + ingest = in.readOptionalWriteable(IngestInfo::new); } @Override @@ -249,53 +228,13 @@ public class NodeInfo extends BaseNodeResponse { out.writeBoolean(true); Settings.writeSettingsToStream(settings, out); } - if (os == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - os.writeTo(out); - } - if (process == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - process.writeTo(out); - } - if (jvm == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - jvm.writeTo(out); - } - if (threadPool == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - threadPool.writeTo(out); - } - if (transport == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - transport.writeTo(out); - } - if (http == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - http.writeTo(out); - } - if (plugins == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - plugins.writeTo(out); - } - if (ingest == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - ingest.writeTo(out); - } + out.writeOptionalWriteable(os); + out.writeOptionalWriteable(process); + out.writeOptionalWriteable(jvm); + out.writeOptionalWriteable(threadPool); + out.writeOptionalWriteable(transport); + out.writeOptionalWriteable(http); + out.writeOptionalWriteable(plugins); + out.writeOptionalWriteable(ingest); } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/PluginsAndModules.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/PluginsAndModules.java index 3831fd24f3e..206dd262ed8 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/PluginsAndModules.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/PluginsAndModules.java @@ -21,7 +21,7 @@ package org.elasticsearch.action.admin.cluster.node.info; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.plugins.PluginInfo; @@ -34,13 +34,24 @@ import java.util.List; /** * Information about plugins and modules */ -public class PluginsAndModules implements Streamable, ToXContent { - private List plugins; - private List modules; +public class PluginsAndModules implements Writeable, ToXContent { + private final List plugins; + private final List modules; - public PluginsAndModules() { - plugins = new ArrayList<>(); - modules = new ArrayList<>(); + public PluginsAndModules(List plugins, List modules) { + this.plugins = Collections.unmodifiableList(plugins); + this.modules = Collections.unmodifiableList(modules); + } + + public PluginsAndModules(StreamInput in) throws IOException { + this.plugins = Collections.unmodifiableList(in.readList(PluginInfo::new)); + this.modules = Collections.unmodifiableList(in.readList(PluginInfo::new)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeList(plugins); + out.writeList(modules); } /** @@ -69,33 +80,6 @@ public class PluginsAndModules implements Streamable, ToXContent { modules.add(info); } - @Override - public void readFrom(StreamInput in) throws IOException { - if (plugins.isEmpty() == false || modules.isEmpty() == false) { - throw new IllegalStateException("instance is already populated"); - } - int plugins_size = in.readInt(); - for (int i = 0; i < plugins_size; i++) { - plugins.add(PluginInfo.readFromStream(in)); - } - int modules_size = in.readInt(); - for (int i = 0; i < modules_size; i++) { - modules.add(PluginInfo.readFromStream(in)); - } - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeInt(plugins.size()); - for (PluginInfo plugin : getPluginInfos()) { - plugin.writeTo(out); - } - out.writeInt(modules.size()); - for (PluginInfo module : getModuleInfos()) { - module.writeTo(out); - } - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startArray("plugins"); diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java index 6d50925089b..0a750e571e3 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java @@ -212,7 +212,7 @@ public class NodeStats extends BaseNodeResponse implements ToXContent { indices = NodeIndicesStats.readIndicesStats(in); } if (in.readBoolean()) { - os = OsStats.readOsStats(in); + os = new OsStats(in); } if (in.readBoolean()) { process = ProcessStats.readProcessStats(in); diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java index 875562ad64a..fbfe3ad3713 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java @@ -19,6 +19,9 @@ package org.elasticsearch.action.admin.cluster.reroute; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; @@ -33,7 +36,6 @@ import org.elasticsearch.cluster.routing.allocation.RoutingExplanations; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -77,13 +79,13 @@ public class TransportClusterRerouteAction extends TransportMasterNodeAction listener; - private final ESLogger logger; + private final Logger logger; private final AllocationService allocationService; private volatile ClusterState clusterStateToSend; private volatile RoutingExplanations explanations; - ClusterRerouteResponseAckedClusterStateUpdateTask(ESLogger logger, AllocationService allocationService, ClusterRerouteRequest request, - ActionListener listener) { + ClusterRerouteResponseAckedClusterStateUpdateTask(Logger logger, AllocationService allocationService, ClusterRerouteRequest request, + ActionListener listener) { super(Priority.IMMEDIATE, request, listener); this.request = request; this.listener = listener; @@ -103,7 +105,7 @@ public class TransportClusterRerouteAction extends TransportMasterNodeAction) () -> new ParameterizedMessage("failed to perform [{}]", source), e); super.onFailure(source, e); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java index 4464b5d793f..2907716eaae 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java @@ -19,6 +19,8 @@ package org.elasticsearch.action.admin.cluster.settings; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; @@ -148,7 +150,7 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct @Override public void onFailure(String source, Exception e) { //if the reroute fails we only log - logger.debug("failed to perform [{}]", e, source); + logger.debug((Supplier) () -> new ParameterizedMessage("failed to perform [{}]", source), e); listener.onFailure(new ElasticsearchException("reroute after update settings failed", e)); } @@ -166,7 +168,7 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct @Override public void onFailure(String source, Exception e) { - logger.debug("failed to perform [{}]", e, source); + logger.debug((Supplier) () -> new ParameterizedMessage("failed to perform [{}]", source), e); super.onFailure(source, e); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java index 71ec050b481..4c475229ae5 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java @@ -87,7 +87,7 @@ public class ClusterStatsNodes implements ToXContent { } } this.counts = new Counts(nodeInfos); - this.os = new OsStats(nodeInfos); + this.os = new OsStats(nodeInfos, nodeStats); this.process = new ProcessStats(nodeStats); this.jvm = new JvmStats(nodeInfos, nodeStats); this.networkTypes = new NetworkTypes(nodeInfos); @@ -226,11 +226,12 @@ public class ClusterStatsNodes implements ToXContent { final int availableProcessors; final int allocatedProcessors; final ObjectIntHashMap names; + final org.elasticsearch.monitor.os.OsStats.Mem mem; /** * Build the stats from information about each node. */ - private OsStats(List nodeInfos) { + private OsStats(List nodeInfos, List nodeStatsList) { this.names = new ObjectIntHashMap<>(); int availableProcessors = 0; int allocatedProcessors = 0; @@ -244,6 +245,22 @@ public class ClusterStatsNodes implements ToXContent { } this.availableProcessors = availableProcessors; this.allocatedProcessors = allocatedProcessors; + + long totalMemory = 0; + long freeMemory = 0; + for (NodeStats nodeStats : nodeStatsList) { + if (nodeStats.getOs() != null) { + long total = nodeStats.getOs().getMem().getTotal().bytes(); + if (total > 0) { + totalMemory += total; + } + long free = nodeStats.getOs().getMem().getFree().bytes(); + if (free > 0) { + freeMemory += free; + } + } + } + this.mem = new org.elasticsearch.monitor.os.OsStats.Mem(totalMemory, freeMemory); } public int getAvailableProcessors() { @@ -254,6 +271,10 @@ public class ClusterStatsNodes implements ToXContent { return allocatedProcessors; } + public org.elasticsearch.monitor.os.OsStats.Mem getMem() { + return mem; + } + static final class Fields { static final String AVAILABLE_PROCESSORS = "available_processors"; static final String ALLOCATED_PROCESSORS = "allocated_processors"; @@ -274,6 +295,7 @@ public class ClusterStatsNodes implements ToXContent { builder.endObject(); } builder.endArray(); + mem.toXContent(builder, params); return builder; } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java index 72287c84662..3eb73273832 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -92,7 +92,7 @@ public class TransportClusterStatsAction extends TransportNodesAction shardsStats = new ArrayList<>(); for (IndexService indexService : indicesService) { for (IndexShard indexShard : indexService) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java index 06810c4dcd1..d33f37defec 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java @@ -19,6 +19,8 @@ package org.elasticsearch.action.admin.indices.close; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DestructiveOperations; @@ -108,7 +110,7 @@ public class TransportCloseIndexAction extends TransportMasterNodeAction) () -> new ParameterizedMessage("failed to close indices [{}]", (Object) concreteIndices), t); listener.onFailure(t); } }); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java index 947936bddc7..251eed8bdb8 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java @@ -19,6 +19,8 @@ package org.elasticsearch.action.admin.indices.delete; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DestructiveOperations; @@ -100,7 +102,7 @@ public class TransportDeleteIndexAction extends TransportMasterNodeAction) () -> new ParameterizedMessage("failed to delete indices [{}]", concreteIndices), t); listener.onFailure(t); } }); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java index a03472262e7..d9ebf88fda6 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java @@ -19,6 +19,8 @@ package org.elasticsearch.action.admin.indices.mapping.put; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; @@ -92,12 +94,12 @@ public class TransportPutMappingAction extends TransportMasterNodeAction) () -> new ParameterizedMessage("failed to put mappings on indices [{}], type [{}]", concreteIndices, request.type()), t); listener.onFailure(t); } }); } catch (IndexNotFoundException ex) { - logger.debug("failed to put mappings on indices [{}], type [{}]", ex, request.indices(), request.type()); + logger.debug((Supplier) () -> new ParameterizedMessage("failed to put mappings on indices [{}], type [{}]", request.indices(), request.type()), ex); throw ex; } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java index d672d55b322..1128ebf9875 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java @@ -19,6 +19,8 @@ package org.elasticsearch.action.admin.indices.open; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DestructiveOperations; @@ -93,7 +95,7 @@ public class TransportOpenIndexAction extends TransportMasterNodeAction) () -> new ParameterizedMessage("failed to open indices [{}]", (Object) concreteIndices), t); listener.onFailure(t); } }); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java index 5655400465f..f9ebff06636 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java @@ -19,6 +19,8 @@ package org.elasticsearch.action.admin.indices.settings.put; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; @@ -92,7 +94,7 @@ public class TransportUpdateSettingsAction extends TransportMasterNodeAction) () -> new ParameterizedMessage("failed to update settings on indices [{}]", (Object) concreteIndices), t); listener.onFailure(t); } }); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java index 2de65e090e8..e13578d66de 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.action.admin.indices.shards; +import org.apache.logging.log4j.Logger; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.FailedNodeException; @@ -41,7 +42,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.ImmutableOpenIntMap; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.gateway.AsyncShardFetch; @@ -150,7 +150,7 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc private class InternalAsyncFetch extends AsyncShardFetch { - InternalAsyncFetch(ESLogger logger, String type, ShardId shardId, TransportNodesListGatewayStartedShards action) { + InternalAsyncFetch(Logger logger, String type, ShardId shardId, TransportNodesListGatewayStartedShards action) { super(logger, type, shardId, action); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java index 33addcb8440..bb18b57fa93 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java @@ -18,6 +18,8 @@ */ package org.elasticsearch.action.admin.indices.template.delete; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; @@ -73,7 +75,7 @@ public class TransportDeleteIndexTemplateAction extends TransportMasterNodeActio @Override public void onFailure(Exception e) { - logger.debug("failed to delete templates [{}]", e, request.name()); + logger.debug((Supplier) () -> new ParameterizedMessage("failed to delete templates [{}]", request.name()), e); listener.onFailure(e); } }); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java index 0d14c4d24df..64a01b90ac4 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java @@ -18,6 +18,8 @@ */ package org.elasticsearch.action.admin.indices.template.put; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; @@ -94,7 +96,7 @@ public class TransportPutIndexTemplateAction extends TransportMasterNodeAction

) () -> new ParameterizedMessage("failed to put template [{}]", request.name()), e); listener.onFailure(e); } }); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java index 3b77892086b..f467c6ae749 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java @@ -19,6 +19,8 @@ package org.elasticsearch.action.admin.indices.upgrade.post; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; @@ -79,7 +81,7 @@ public class TransportUpgradeSettingsAction extends TransportMasterNodeAction) () -> new ParameterizedMessage("failed to upgrade minimum compatibility version settings on indices [{}]", request.versions().keySet()), t); listener.onFailure(t); } }); diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java index a829e4b0292..6ad566ca500 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java @@ -18,9 +18,11 @@ */ package org.elasticsearch.action.bulk; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.Client; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; @@ -31,7 +33,7 @@ import java.util.concurrent.TimeUnit; * Abstracts the low-level details of bulk request handling */ abstract class BulkRequestHandler { - protected final ESLogger logger; + protected final Logger logger; protected final Client client; protected BulkRequestHandler(Client client) { @@ -76,12 +78,12 @@ abstract class BulkRequestHandler { listener.afterBulk(executionId, bulkRequest, bulkResponse); } catch (InterruptedException e) { Thread.currentThread().interrupt(); - logger.info("Bulk request {} has been cancelled.", e, executionId); + logger.info((Supplier) () -> new ParameterizedMessage("Bulk request {} has been cancelled.", executionId), e); if (!afterCalled) { listener.afterBulk(executionId, bulkRequest, e); } } catch (Exception e) { - logger.warn("Failed to execute bulk request {}.", e, executionId); + logger.warn((Supplier) () -> new ParameterizedMessage("Failed to execute bulk request {}.", executionId), e); if (!afterCalled) { listener.afterBulk(executionId, bulkRequest, e); } @@ -142,10 +144,10 @@ abstract class BulkRequestHandler { bulkRequestSetupSuccessful = true; } catch (InterruptedException e) { Thread.currentThread().interrupt(); - logger.info("Bulk request {} has been cancelled.", e, executionId); + logger.info((Supplier) () -> new ParameterizedMessage("Bulk request {} has been cancelled.", executionId), e); listener.afterBulk(executionId, bulkRequest, e); } catch (Exception e) { - logger.warn("Failed to execute bulk request {}.", e, executionId); + logger.warn((Supplier) () -> new ParameterizedMessage("Failed to execute bulk request {}.", executionId), e); listener.afterBulk(executionId, bulkRequest, e); } finally { if (!bulkRequestSetupSuccessful && acquired) { // if we fail on client.bulk() release the semaphore diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java index 2c16bcb5e9c..ffc2407b8a4 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.bulk; import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; +import org.elasticsearch.action.support.replication.ReplicationRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.shard.ShardId; @@ -101,4 +102,15 @@ public class BulkShardRequest extends ReplicatedWriteRequest { } return b.toString(); } + + @Override + public void onRetry() { + for (BulkItemRequest item : items) { + if (item.request() instanceof ReplicationRequest) { + // all replication requests need to be notified here as well to ie. make sure that internal optimizations are + // disabled see IndexRequest#canHaveDuplicates() + ((ReplicationRequest) item.request()).onRetry(); + } + } + } } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/Retry.java b/core/src/main/java/org/elasticsearch/action/bulk/Retry.java index 95778785ab9..375796ae801 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/Retry.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/Retry.java @@ -18,12 +18,12 @@ */ package org.elasticsearch.action.bulk; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.Client; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.FutureUtils; @@ -89,7 +89,7 @@ public class Retry { } static class AbstractRetryHandler implements ActionListener { - private final ESLogger logger; + private final Logger logger; private final Client client; private final ActionListener listener; private final Iterator backoff; diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 745449c0a7b..754316f3de0 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -19,6 +19,8 @@ package org.elasticsearch.action.bulk; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionRequest; @@ -30,8 +32,8 @@ import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.index.TransportIndexAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.replication.ReplicationRequest; -import org.elasticsearch.action.support.replication.TransportWriteAction; import org.elasticsearch.action.support.replication.ReplicationResponse.ShardInfo; +import org.elasticsearch.action.support.replication.TransportWriteAction; import org.elasticsearch.action.update.UpdateHelper; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; @@ -183,9 +185,9 @@ public class TransportShardBulkAction extends TransportWriteAction> void logFailure(Throwable t, String operation, ShardId shardId, ReplicationRequest request) { if (ExceptionsHelper.status(t) == RestStatus.CONFLICT) { - logger.trace("{} failed to execute bulk item ({}) {}", t, shardId, operation, request); + logger.trace((Supplier) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", shardId, operation, request), t); } else { - logger.debug("{} failed to execute bulk item ({}) {}", t, shardId, operation, request); + logger.debug((Supplier) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", shardId, operation, request), t); } } diff --git a/core/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java b/core/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java index d9e5eaa2e1b..ec3d182de49 100644 --- a/core/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java +++ b/core/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java @@ -19,6 +19,8 @@ package org.elasticsearch.action.get; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.TransportActions; @@ -92,7 +94,7 @@ public class TransportShardMultiGetAction extends TransportSingleShardAction) () -> new ParameterizedMessage("{} failed to execute multi_get for [{}]/[{}]", shardId, item.type(), item.id()), e); response.add(request.locations.get(i), new MultiGetResponse.Failure(request.index(), item.type(), item.id(), e)); } } diff --git a/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java index 63ede68b9fe..1f6bc9108c2 100644 --- a/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -72,7 +72,7 @@ public class IndexRequest extends ReplicatedWriteRequest implement /** * Operation type controls if the type of the index operation. */ - public static enum OpType { + public enum OpType { /** * Index the source. If there an existing document with the id, it will * be replaced. @@ -152,6 +152,17 @@ public class IndexRequest extends ReplicatedWriteRequest implement private String pipeline; + /** + * Value for {@link #getAutoGeneratedTimestamp()} if the document has an external + * provided ID. + */ + public static final int UNSET_AUTO_GENERATED_TIMESTAMP = -1; + + private long autoGeneratedTimestamp = UNSET_AUTO_GENERATED_TIMESTAMP; + + private boolean isRetry = false; + + public IndexRequest() { } @@ -202,6 +213,10 @@ public class IndexRequest extends ReplicatedWriteRequest implement } } + if (opType() != OpType.INDEX && id == null) { + addValidationError("an id is required for a " + opType() + " operation", validationException); + } + if (!versionType.validateVersionForWrites(version)) { validationException = addValidationError("illegal version value [" + version + "] for version type [" + versionType.name() + "]", validationException); } @@ -216,6 +231,11 @@ public class IndexRequest extends ReplicatedWriteRequest implement validationException = addValidationError("id is too long, must be no longer than 512 bytes but was: " + id.getBytes(StandardCharsets.UTF_8).length, validationException); } + + if (id == null && (versionType == VersionType.INTERNAL && version == Versions.MATCH_ANY) == false) { + validationException = addValidationError("an id must be provided if version type or value are set", validationException); + } + return validationException; } @@ -589,10 +609,10 @@ public class IndexRequest extends ReplicatedWriteRequest implement } // generate id if not already provided and id generation is allowed - if (allowIdGeneration) { - if (id == null) { - id(UUIDs.base64UUID()); - } + if (allowIdGeneration && id == null) { + assert autoGeneratedTimestamp == -1; + autoGeneratedTimestamp = Math.max(0, System.currentTimeMillis()); // extra paranoia + id(UUIDs.base64UUID()); } // generate timestamp if not provided, we always have one post this stage... @@ -639,6 +659,8 @@ public class IndexRequest extends ReplicatedWriteRequest implement version = in.readLong(); versionType = VersionType.fromValue(in.readByte()); pipeline = in.readOptionalString(); + isRetry = in.readBoolean(); + autoGeneratedTimestamp = in.readLong(); } @Override @@ -655,6 +677,8 @@ public class IndexRequest extends ReplicatedWriteRequest implement out.writeLong(version); out.writeByte(versionType.getValue()); out.writeOptionalString(pipeline); + out.writeBoolean(isRetry); + out.writeLong(autoGeneratedTimestamp); } @Override @@ -667,4 +691,25 @@ public class IndexRequest extends ReplicatedWriteRequest implement } return "index {[" + index + "][" + type + "][" + id + "], source[" + sSource + "]}"; } + + + /** + * Returns true if this request has been sent to a shard copy more than once. + */ + public boolean isRetry() { + return isRetry; + } + + @Override + public void onRetry() { + isRetry = true; + } + + /** + * Returns the timestamp the auto generated ID was created or {@value #UNSET_AUTO_GENERATED_TIMESTAMP} if the + * document has no auto generated timestamp. This method will return a positive value iff the id was auto generated. + */ + public long getAutoGeneratedTimestamp() { + return autoGeneratedTimestamp; + } } diff --git a/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java index 20587bf0ea9..c4609e03aa5 100644 --- a/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java @@ -205,15 +205,6 @@ public class IndexRequestBuilder extends ReplicationRequestBuildertrue to force this index to use {@link org.elasticsearch.action.index.IndexRequest.OpType#CREATE}. */ diff --git a/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java b/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java index c7eef2758cc..cc3fbb7906d 100644 --- a/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java @@ -158,7 +158,7 @@ public class TransportIndexAction extends TransportWriteAction executeIndexRequestOnPrimary(IndexRequest request, IndexShard indexShard, diff --git a/core/src/main/java/org/elasticsearch/action/ingest/IngestActionFilter.java b/core/src/main/java/org/elasticsearch/action/ingest/IngestActionFilter.java index cf7d2cf1e54..68983ccfd22 100644 --- a/core/src/main/java/org/elasticsearch/action/ingest/IngestActionFilter.java +++ b/core/src/main/java/org/elasticsearch/action/ingest/IngestActionFilter.java @@ -19,6 +19,8 @@ package org.elasticsearch.action.ingest; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; @@ -90,7 +92,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio void processIndexRequest(Task task, String action, ActionListener listener, ActionFilterChain chain, IndexRequest indexRequest) { executionService.executeIndexRequest(indexRequest, t -> { - logger.error("failed to execute pipeline [{}]", t, indexRequest.getPipeline()); + logger.error((Supplier) () -> new ParameterizedMessage("failed to execute pipeline [{}]", indexRequest.getPipeline()), t); listener.onFailure(t); }, success -> { // TransportIndexAction uses IndexRequest and same action name on the node that receives the request and the node that @@ -105,7 +107,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio long ingestStartTimeInNanos = System.nanoTime(); BulkRequestModifier bulkRequestModifier = new BulkRequestModifier(original); executionService.executeBulkRequest(() -> bulkRequestModifier, (indexRequest, exception) -> { - logger.debug("failed to execute pipeline [{}] for document [{}/{}/{}]", exception, indexRequest.getPipeline(), indexRequest.index(), indexRequest.type(), indexRequest.id()); + logger.debug((Supplier) () -> new ParameterizedMessage("failed to execute pipeline [{}] for document [{}/{}/{}]", indexRequest.getPipeline(), indexRequest.index(), indexRequest.type(), indexRequest.id()), exception); bulkRequestModifier.markCurrentItemAsFailed(exception); }, (exception) -> { if (exception != null) { diff --git a/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index a61384f9595..d86134c7d44 100644 --- a/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -20,8 +20,10 @@ package org.elasticsearch.action.search; import com.carrotsearch.hppc.IntArrayList; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.search.ScoreDoc; -import org.apache.lucene.search.TopDocs; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.support.TransportActions; @@ -35,7 +37,6 @@ import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; @@ -46,7 +47,6 @@ import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchTransportRequest; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.query.QuerySearchResultProvider; -import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.threadpool.ThreadPool; import java.util.List; @@ -58,7 +58,7 @@ import static org.elasticsearch.action.search.TransportSearchHelper.internalSear abstract class AbstractSearchAsyncAction extends AbstractAsyncAction { - protected final ESLogger logger; + protected final Logger logger; protected final SearchTransportService searchTransportService; private final IndexNameExpressionResolver indexNameExpressionResolver; protected final SearchPhaseController searchPhaseController; @@ -77,7 +77,7 @@ abstract class AbstractSearchAsyncAction private final Object shardFailuresMutex = new Object(); protected volatile ScoreDoc[] sortedShardDocs; - protected AbstractSearchAsyncAction(ESLogger logger, SearchTransportService searchTransportService, ClusterService clusterService, + protected AbstractSearchAsyncAction(Logger logger, SearchTransportService searchTransportService, ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, SearchPhaseController searchPhaseController, ThreadPool threadPool, SearchRequest request, ActionListener listener) { @@ -191,7 +191,12 @@ abstract class AbstractSearchAsyncAction innerMoveToSecondPhase(); } catch (Exception e) { if (logger.isDebugEnabled()) { - logger.debug("{}: Failed to execute [{}] while moving to second phase", e, shardIt.shardId(), request); + logger.debug( + (Supplier) () -> new ParameterizedMessage( + "{}: Failed to execute [{}] while moving to second phase", + shardIt.shardId(), + request), + e); } raiseEarlyFailure(new ReduceSearchPhaseException(firstPhaseName(), "", e, buildShardFailures())); } @@ -211,15 +216,21 @@ abstract class AbstractSearchAsyncAction if (totalOps.incrementAndGet() == expectedTotalOps) { if (logger.isDebugEnabled()) { if (e != null && !TransportActions.isShardNotAvailableException(e)) { - logger.debug("{}: Failed to execute [{}]", e, shard != null ? shard.shortSummary() : shardIt.shardId(), request); + logger.debug( + (Supplier) () -> new ParameterizedMessage( + "{}: Failed to execute [{}]", + shard != null ? shard.shortSummary() : + shardIt.shardId(), + request), + e); } else if (logger.isTraceEnabled()) { - logger.trace("{}: Failed to execute [{}]", e, shard, request); + logger.trace((Supplier) () -> new ParameterizedMessage("{}: Failed to execute [{}]", shard, request), e); } } final ShardSearchFailure[] shardSearchFailures = buildShardFailures(); if (successfulOps.get() == 0) { if (logger.isDebugEnabled()) { - logger.debug("All shards failed for phase: [{}]", e, firstPhaseName()); + logger.debug((Supplier) () -> new ParameterizedMessage("All shards failed for phase: [{}]", firstPhaseName()), e); } // no successful ops, raise an exception @@ -236,10 +247,13 @@ abstract class AbstractSearchAsyncAction final ShardRouting nextShard = shardIt.nextOrNull(); final boolean lastShard = nextShard == null; // trace log this exception - if (logger.isTraceEnabled()) { - logger.trace("{}: Failed to execute [{}] lastShard [{}]", e, shard != null ? shard.shortSummary() : shardIt.shardId(), - request, lastShard); - } + logger.trace( + (Supplier) () -> new ParameterizedMessage( + "{}: Failed to execute [{}] lastShard [{}]", + shard != null ? shard.shortSummary() : shardIt.shardId(), + request, + lastShard), + e); if (!lastShard) { try { performFirstPhase(shardIndex, shardIt, nextShard); @@ -251,8 +265,14 @@ abstract class AbstractSearchAsyncAction // no more shards active, add a failure if (logger.isDebugEnabled() && !logger.isTraceEnabled()) { // do not double log this exception if (e != null && !TransportActions.isShardNotAvailableException(e)) { - logger.debug("{}: Failed to execute [{}] lastShard [{}]", e, - shard != null ? shard.shortSummary() : shardIt.shardId(), request, lastShard); + logger.debug( + (Supplier) () -> new ParameterizedMessage( + "{}: Failed to execute [{}] lastShard [{}]", + shard != null ? shard.shortSummary() : + shardIt.shardId(), + request, + lastShard), + e); } } } diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java index 8614d7b1188..367832afab3 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java @@ -19,12 +19,14 @@ package org.elasticsearch.action.search; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.action.SearchTransportService; import org.elasticsearch.search.controller.SearchPhaseController; @@ -43,7 +45,7 @@ class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction queryFetchResults; - SearchDfsQueryAndFetchAsyncAction(ESLogger logger, SearchTransportService searchTransportService, + SearchDfsQueryAndFetchAsyncAction(Logger logger, SearchTransportService searchTransportService, ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, SearchPhaseController searchPhaseController, ThreadPool threadPool, SearchRequest request, ActionListener listener) { @@ -105,7 +107,7 @@ class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction) () -> new ParameterizedMessage("[{}] Failed to execute query phase", querySearchRequest.id()), e); } this.addShardFailure(shardIndex, dfsResult.shardTarget(), e); successfulOps.decrementAndGet(); diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java index 9d8305cf6b1..7ceefb1998c 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java @@ -20,13 +20,15 @@ package org.elasticsearch.action.search; import com.carrotsearch.hppc.IntArrayList; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.search.ScoreDoc; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.action.SearchTransportService; @@ -50,7 +52,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction fetchResults; final AtomicArray docIdsToLoad; - SearchDfsQueryThenFetchAsyncAction(ESLogger logger, SearchTransportService searchTransportService, + SearchDfsQueryThenFetchAsyncAction(Logger logger, SearchTransportService searchTransportService, ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, SearchPhaseController searchPhaseController, ThreadPool threadPool, SearchRequest request, ActionListener listener) { @@ -113,7 +115,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction) () -> new ParameterizedMessage("[{}] Failed to execute query phase", querySearchRequest.id()), e); } this.addShardFailure(shardIndex, dfsResult.shardTarget(), e); successfulOps.decrementAndGet(); @@ -182,7 +184,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction) () -> new ParameterizedMessage("[{}] Failed to execute fetch phase", fetchSearchRequest.id()), e); } this.addShardFailure(shardIndex, shardTarget, e); successfulOps.decrementAndGet(); diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java index fad4d60275d..2e13a0d26e8 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.search; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.search.action.SearchTransportService; import org.elasticsearch.search.controller.SearchPhaseController; import org.elasticsearch.search.fetch.QueryFetchSearchResult; @@ -36,7 +36,7 @@ import java.io.IOException; class SearchQueryAndFetchAsyncAction extends AbstractSearchAsyncAction { - SearchQueryAndFetchAsyncAction(ESLogger logger, SearchTransportService searchTransportService, + SearchQueryAndFetchAsyncAction(Logger logger, SearchTransportService searchTransportService, ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, SearchPhaseController searchPhaseController, ThreadPool threadPool, SearchRequest request, ActionListener listener) { diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java index 5f90d291dd2..3987b48c561 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java @@ -20,13 +20,15 @@ package org.elasticsearch.action.search; import com.carrotsearch.hppc.IntArrayList; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.search.ScoreDoc; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.action.SearchTransportService; @@ -46,7 +48,7 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction fetchResults; final AtomicArray docIdsToLoad; - SearchQueryThenFetchAsyncAction(ESLogger logger, SearchTransportService searchService, + SearchQueryThenFetchAsyncAction(Logger logger, SearchTransportService searchService, ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, SearchPhaseController searchPhaseController, ThreadPool threadPool, SearchRequest request, ActionListener listener) { @@ -115,7 +117,7 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction) () -> new ParameterizedMessage("[{}] Failed to execute fetch phase", fetchSearchRequest.id()), e); } this.addShardFailure(shardIndex, shardTarget, e); successfulOps.decrementAndGet(); diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java index 72154f224d2..24e497954a7 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java @@ -19,12 +19,14 @@ package org.elasticsearch.action.search; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.search.ScoreDoc; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.action.SearchTransportService; import org.elasticsearch.search.controller.SearchPhaseController; @@ -40,7 +42,7 @@ import static org.elasticsearch.action.search.TransportSearchHelper.internalScro class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction { - private final ESLogger logger; + private final Logger logger; private final SearchPhaseController searchPhaseController; private final SearchTransportService searchTransportService; private final SearchScrollRequest request; @@ -52,7 +54,7 @@ class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction { private final AtomicInteger successfulOps; private final AtomicInteger counter; - SearchScrollQueryAndFetchAsyncAction(ESLogger logger, ClusterService clusterService, + SearchScrollQueryAndFetchAsyncAction(Logger logger, ClusterService clusterService, SearchTransportService searchTransportService, SearchPhaseController searchPhaseController, SearchScrollRequest request, ParsedScrollId scrollId, ActionListener listener) { this.logger = logger; @@ -146,7 +148,7 @@ class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction { private void onPhaseFailure(Exception e, long searchId, int shardIndex) { if (logger.isDebugEnabled()) { - logger.debug("[{}] Failed to execute query phase", e, searchId); + logger.debug((Supplier) () -> new ParameterizedMessage("[{}] Failed to execute query phase", searchId), e); } addShardFailure(shardIndex, new ShardSearchFailure(e)); successfulOps.decrementAndGet(); diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java index d9f649a7a55..21f1c4ce68a 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java @@ -20,12 +20,14 @@ package org.elasticsearch.action.search; import com.carrotsearch.hppc.IntArrayList; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.search.ScoreDoc; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.action.SearchTransportService; import org.elasticsearch.search.controller.SearchPhaseController; @@ -43,7 +45,7 @@ import static org.elasticsearch.action.search.TransportSearchHelper.internalScro class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction { - private final ESLogger logger; + private final Logger logger; private final SearchTransportService searchTransportService; private final SearchPhaseController searchPhaseController; private final SearchScrollRequest request; @@ -56,7 +58,7 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction { private volatile ScoreDoc[] sortedShardDocs; private final AtomicInteger successfulOps; - SearchScrollQueryThenFetchAsyncAction(ESLogger logger, ClusterService clusterService, + SearchScrollQueryThenFetchAsyncAction(Logger logger, ClusterService clusterService, SearchTransportService searchTransportService, SearchPhaseController searchPhaseController, SearchScrollRequest request, ParsedScrollId scrollId, ActionListener listener) { this.logger = logger; @@ -146,7 +148,7 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction { void onQueryPhaseFailure(final int shardIndex, final AtomicInteger counter, final long searchId, Exception failure) { if (logger.isDebugEnabled()) { - logger.debug("[{}] Failed to execute query phase", failure, searchId); + logger.debug((Supplier) () -> new ParameterizedMessage("[{}] Failed to execute query phase", searchId), failure); } addShardFailure(shardIndex, new ShardSearchFailure(failure)); successfulOps.decrementAndGet(); diff --git a/core/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java b/core/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java index 092b69fc936..220e7f5b250 100644 --- a/core/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java @@ -19,6 +19,8 @@ package org.elasticsearch.action.search; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; @@ -144,7 +146,7 @@ public class TransportClearScrollAction extends HandledTransportAction) () -> new ParameterizedMessage("Clear SC failed on node[{}]", node), e); if (expectedOps.countDown()) { listener.onResponse(new ClearScrollResponse(false, numberOfFreedSearchContexts.get())); } else { diff --git a/core/src/main/java/org/elasticsearch/action/support/AbstractListenableActionFuture.java b/core/src/main/java/org/elasticsearch/action/support/AbstractListenableActionFuture.java index 78a61ebfe16..ee260ddd1e1 100644 --- a/core/src/main/java/org/elasticsearch/action/support/AbstractListenableActionFuture.java +++ b/core/src/main/java/org/elasticsearch/action/support/AbstractListenableActionFuture.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.support; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ListenableActionFuture; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.threadpool.ThreadPool; @@ -33,7 +33,7 @@ import java.util.List; */ public abstract class AbstractListenableActionFuture extends AdapterActionFuture implements ListenableActionFuture { - private static final ESLogger logger = Loggers.getLogger(AbstractListenableActionFuture.class); + private static final Logger logger = Loggers.getLogger(AbstractListenableActionFuture.class); final ThreadPool threadPool; volatile Object listeners; diff --git a/core/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java b/core/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java index d4ddae78225..5c9152b4751 100644 --- a/core/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java +++ b/core/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java @@ -24,8 +24,8 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -41,16 +41,17 @@ import java.util.List; public final class AutoCreateIndex { public static final Setting AUTO_CREATE_INDEX_SETTING = - new Setting<>("action.auto_create_index", "true", AutoCreate::new, Property.NodeScope); + new Setting<>("action.auto_create_index", "true", AutoCreate::new, Property.NodeScope, Setting.Property.Dynamic); private final boolean dynamicMappingDisabled; private final IndexNameExpressionResolver resolver; - private final AutoCreate autoCreate; + private volatile AutoCreate autoCreate; - public AutoCreateIndex(Settings settings, IndexNameExpressionResolver resolver) { + public AutoCreateIndex(Settings settings, ClusterSettings clusterSettings, IndexNameExpressionResolver resolver) { this.resolver = resolver; dynamicMappingDisabled = !MapperService.INDEX_MAPPER_DYNAMIC_SETTING.get(settings); this.autoCreate = AUTO_CREATE_INDEX_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer(AUTO_CREATE_INDEX_SETTING, this::setAutoCreate); } /** @@ -64,6 +65,8 @@ public final class AutoCreateIndex { * Should the index be auto created? */ public boolean shouldAutoCreate(String index, ClusterState state) { + // One volatile read, so that all checks are done against the same instance: + final AutoCreate autoCreate = this.autoCreate; if (autoCreate.autoCreateIndex == false) { return false; } @@ -87,7 +90,15 @@ public final class AutoCreateIndex { return false; } - private static class AutoCreate { + AutoCreate getAutoCreate() { + return autoCreate; + } + + void setAutoCreate(AutoCreate autoCreate) { + this.autoCreate = autoCreate; + } + + static class AutoCreate { private final boolean autoCreateIndex; private final List> expressions; @@ -128,5 +139,13 @@ public final class AutoCreateIndex { this.expressions = expressions; this.autoCreateIndex = autoCreateIndex; } + + boolean isAutoCreateIndex() { + return autoCreateIndex; + } + + List> getExpressions() { + return expressions; + } } } diff --git a/core/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java b/core/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java index b4033d6a9fc..0a53b63b662 100644 --- a/core/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.action.support; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; @@ -75,8 +76,13 @@ public abstract class HandledTransportAction) + () -> new ParameterizedMessage( + "Failed to send error response for action [{}] and request [{}]", + actionName, + request), + e1); } } }); diff --git a/core/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java b/core/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java index 4ff7cdaa7bd..759693e550e 100644 --- a/core/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java +++ b/core/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java @@ -19,10 +19,12 @@ package org.elasticsearch.action.support; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.Client; import org.elasticsearch.client.transport.TransportClient; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.threadpool.ThreadPool; @@ -39,12 +41,12 @@ public final class ThreadedActionListener implements ActionListener implements ActionListener listener; private final boolean forceExecution; - public ThreadedActionListener(ESLogger logger, ThreadPool threadPool, String executor, ActionListener listener, + public ThreadedActionListener(Logger logger, ThreadPool threadPool, String executor, ActionListener listener, boolean forceExecution) { this.logger = logger; this.threadPool = threadPool; @@ -118,7 +120,8 @@ public final class ThreadedActionListener implements ActionListener) () -> new ParameterizedMessage("failed to execute failure callback on [{}]", listener), e); } }); } diff --git a/core/src/main/java/org/elasticsearch/action/support/TransportAction.java b/core/src/main/java/org/elasticsearch/action/support/TransportAction.java index 582878a427d..7d1a091d6b3 100644 --- a/core/src/main/java/org/elasticsearch/action/support/TransportAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/TransportAction.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.support; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; @@ -27,7 +28,6 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskListener; @@ -165,9 +165,9 @@ public abstract class TransportAction, Re private final TransportAction action; private final AtomicInteger index = new AtomicInteger(); - private final ESLogger logger; + private final Logger logger; - private RequestFilterChain(TransportAction action, ESLogger logger) { + private RequestFilterChain(TransportAction action, Logger logger) { this.action = action; this.logger = logger; } @@ -201,9 +201,9 @@ public abstract class TransportAction, Re private final ActionFilter[] filters; private final AtomicInteger index; - private final ESLogger logger; + private final Logger logger; - private ResponseFilterChain(ActionFilter[] filters, ESLogger logger) { + private ResponseFilterChain(ActionFilter[] filters, Logger logger) { this.filters = filters; this.index = new AtomicInteger(filters.length); this.logger = logger; diff --git a/core/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java b/core/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java index 826d76de83a..87ef385a243 100644 --- a/core/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.support.broadcast; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.support.ActionFilters; @@ -37,10 +38,10 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportRequestHandler; +import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportService; import java.util.concurrent.atomic.AtomicInteger; @@ -224,7 +225,13 @@ public abstract class TransportBroadcastAction) + () -> new ParameterizedMessage( + "{}: failed to execute [{}]", + shard != null ? shard.shortSummary() : shardIt.shardId(), + request), + e); } } } @@ -233,7 +240,13 @@ public abstract class TransportBroadcastAction) + () -> new ParameterizedMessage( + "{}: failed to execute [{}]", + shard != null ? shard.shortSummary() : shardIt.shardId(), + request), + e); } } } diff --git a/core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java b/core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java index cb5ba8788c0..98c962b3eec 100644 --- a/core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.support.broadcast.node; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.IndicesRequest; @@ -46,13 +47,13 @@ import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.NodeShouldNotConnectException; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportService; import java.io.IOException; @@ -363,7 +364,9 @@ public abstract class TransportBroadcastByNodeAction) + () -> new ParameterizedMessage("failed to execute [{}] on node [{}]", actionName, nodeId), t); } // this is defensive to protect against the possibility of double invocation @@ -441,11 +444,23 @@ public abstract class TransportBroadcastByNodeAction) + () -> new ParameterizedMessage( + "[{}] failed to execute operation for shard [{}]", + actionName, + shardRouting.shortSummary()), + e); } } else { if (logger.isDebugEnabled()) { - logger.debug("[{}] failed to execute operation for shard [{}]", e, actionName, shardRouting.shortSummary()); + logger.debug( + (org.apache.logging.log4j.util.Supplier) + () -> new ParameterizedMessage( + "[{}] failed to execute operation for shard [{}]", + actionName, + shardRouting.shortSummary()), + e); } } } diff --git a/core/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java b/core/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java index 7d6d4375731..a664c325a4b 100644 --- a/core/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.support.master; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.ActionResponse; @@ -155,7 +156,7 @@ public abstract class TransportMasterNodeAction) () -> new ParameterizedMessage("master could not publish cluster state or stepped down before publishing action [{}], scheduling a retry", actionName), t); retry(t, MasterNodeChangePredicate.INSTANCE); } else { listener.onFailure(t); @@ -209,7 +210,7 @@ public abstract class TransportMasterNodeAction) () -> new ParameterizedMessage("timed out while retrying [{}] after failure (timeout [{}])", actionName, timeout), failure); listener.onFailure(new MasterNotDiscoveredException(failure)); } }, changePredicate diff --git a/core/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java b/core/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java index 276484286b4..3582f5f5aaf 100644 --- a/core/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.support.nodes; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.NoSuchNodeException; @@ -31,13 +32,13 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.NodeShouldNotConnectException; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportRequestOptions; +import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportService; import java.util.ArrayList; @@ -238,7 +239,9 @@ public abstract class TransportNodesAction) + () -> new ParameterizedMessage("failed to execute on node [{}]", nodeId), t); } if (accumulateExceptions()) { responses.set(idx, new FailedNodeException(nodeId, "Failed node [" + nodeId + "]", t)); diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index 27f0504413c..d541ef6a35c 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -18,6 +18,8 @@ */ package org.elasticsearch.action.support.replication; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; @@ -31,7 +33,6 @@ import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.shard.ShardId; @@ -56,7 +57,7 @@ public class ReplicationOperation< ReplicaRequest extends ReplicationRequest, PrimaryResultT extends ReplicationOperation.PrimaryResult > { - private final ESLogger logger; + private final Logger logger; private final Request request; private final Supplier clusterStateSupplier; private final String opType; @@ -86,7 +87,7 @@ public class ReplicationOperation< public ReplicationOperation(Request request, Primary primary, ActionListener listener, boolean executeOnReplicas, Replicas replicas, - Supplier clusterStateSupplier, ESLogger logger, String opType) { + Supplier clusterStateSupplier, Logger logger, String opType) { this.executeOnReplicas = executeOnReplicas; this.replicasProxy = replicas; this.primary = primary; @@ -189,8 +190,14 @@ public class ReplicationOperation< @Override public void onFailure(Exception replicaException) { - logger.trace("[{}] failure while performing [{}] on replica {}, request [{}]", replicaException, shard.shardId(), opType, - shard, replicaRequest); + logger.trace( + (org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( + "[{}] failure while performing [{}] on replica {}, request [{}]", + shard.shardId(), + opType, + shard, + replicaRequest), + replicaException); if (ignoreReplicaException(replicaException)) { decPendingAndFinishIfNeeded(); } else { @@ -198,7 +205,9 @@ public class ReplicationOperation< shardReplicaFailures.add(new ReplicationResponse.ShardInfo.Failure( shard.shardId(), shard.currentNodeId(), replicaException, restStatus, false)); String message = String.format(Locale.ROOT, "failed to perform %s on replica %s", opType, shard); - logger.warn("[{}] {}", replicaException, shard.shardId(), message); + logger.warn( + (org.apache.logging.log4j.util.Supplier) + () -> new ParameterizedMessage("[{}] {}", shard.shardId(), message), replicaException); replicasProxy.failShard(shard, replicaRequest.primaryTerm(), message, replicaException, ReplicationOperation.this::decPendingAndFinishIfNeeded, ReplicationOperation.this::onPrimaryDemoted, diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java index a1ddcdcedd5..596d2581a79 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java @@ -248,4 +248,12 @@ public abstract class ReplicationRequest new ConcreteShardRequest<>(request), executor, + new PrimaryOperationTransportHandler()); // we must never reject on because of thread pool capacity on replicas - transportService.registerRequestHandler(transportReplicaAction, replicaRequest, executor, true, true, + transportService.registerRequestHandler(transportReplicaAction, + () -> new ConcreteShardRequest<>(replicaRequest), + executor, true, true, new ReplicaOperationTransportHandler()); this.transportOptions = transportOptions(); @@ -162,7 +172,7 @@ public abstract class TransportReplicationAction< /** * Synchronous replica operation on nodes with replica copies. This is done under the lock form - * {@link #acquireReplicaOperationLock(ShardId, long, ActionListener)}. + * {@link #acquireReplicaOperationLock(ShardId, long, String, ActionListener)}. */ protected abstract ReplicaResult shardOperationOnReplica(ReplicaRequest shardRequest); @@ -215,7 +225,9 @@ public abstract class TransportReplicationAction< channel.sendResponse(e); } catch (Exception inner) { inner.addSuppressed(e); - logger.warn("Failed to send response for {}", inner, actionName); + logger.warn( + (org.apache.logging.log4j.util.Supplier) + () -> new ParameterizedMessage("Failed to send response for {}", actionName), inner); } } }); @@ -227,33 +239,36 @@ public abstract class TransportReplicationAction< } } - class PrimaryOperationTransportHandler implements TransportRequestHandler { + class PrimaryOperationTransportHandler implements TransportRequestHandler> { @Override - public void messageReceived(final Request request, final TransportChannel channel) throws Exception { + public void messageReceived(final ConcreteShardRequest request, final TransportChannel channel) throws Exception { throw new UnsupportedOperationException("the task parameter is required for this operation"); } @Override - public void messageReceived(Request request, TransportChannel channel, Task task) { - new AsyncPrimaryAction(request, channel, (ReplicationTask) task).run(); + public void messageReceived(ConcreteShardRequest request, TransportChannel channel, Task task) { + new AsyncPrimaryAction(request.request, request.targetAllocationID, channel, (ReplicationTask) task).run(); } } class AsyncPrimaryAction extends AbstractRunnable implements ActionListener { private final Request request; + /** targetAllocationID of the shard this request is meant for */ + private final String targetAllocationID; private final TransportChannel channel; private final ReplicationTask replicationTask; - AsyncPrimaryAction(Request request, TransportChannel channel, ReplicationTask replicationTask) { + AsyncPrimaryAction(Request request, String targetAllocationID, TransportChannel channel, ReplicationTask replicationTask) { this.request = request; + this.targetAllocationID = targetAllocationID; this.channel = channel; this.replicationTask = replicationTask; } @Override protected void doRun() throws Exception { - acquirePrimaryShardReference(request.shardId(), this); + acquirePrimaryShardReference(request.shardId(), targetAllocationID, this); } @Override @@ -268,7 +283,9 @@ public abstract class TransportReplicationAction< final ShardRouting primary = primaryShardReference.routingEntry(); assert primary.relocating() : "indexShard is marked as relocated but routing isn't" + primary; DiscoveryNode relocatingNode = clusterService.state().nodes().get(primary.relocatingNodeId()); - transportService.sendRequest(relocatingNode, transportPrimaryAction, request, transportOptions, + transportService.sendRequest(relocatingNode, transportPrimaryAction, + new ConcreteShardRequest<>(request, primary.allocationId().getRelocationId()), + transportOptions, new TransportChannelResponseHandler(logger, channel, "rerouting indexing to target primary " + primary, TransportReplicationAction.this::newResponseInstance) { @@ -388,15 +405,17 @@ public abstract class TransportReplicationAction< } } - class ReplicaOperationTransportHandler implements TransportRequestHandler { + class ReplicaOperationTransportHandler implements TransportRequestHandler> { @Override - public void messageReceived(final ReplicaRequest request, final TransportChannel channel) throws Exception { + public void messageReceived(final ConcreteShardRequest request, final TransportChannel channel) + throws Exception { throw new UnsupportedOperationException("the task parameter is required for this operation"); } @Override - public void messageReceived(ReplicaRequest request, TransportChannel channel, Task task) throws Exception { - new AsyncReplicaAction(request, channel, (ReplicationTask) task).run(); + public void messageReceived(ConcreteShardRequest requestWithAID, TransportChannel channel, Task task) + throws Exception { + new AsyncReplicaAction(requestWithAID.request, requestWithAID.targetAllocationID, channel, (ReplicationTask) task).run(); } } @@ -414,6 +433,8 @@ public abstract class TransportReplicationAction< private final class AsyncReplicaAction extends AbstractRunnable implements ActionListener { private final ReplicaRequest request; + // allocation id of the replica this request is meant for + private final String targetAllocationID; private final TransportChannel channel; /** * The task on the node with the replica shard. @@ -423,10 +444,11 @@ public abstract class TransportReplicationAction< // something we want to avoid at all costs private final ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext()); - AsyncReplicaAction(ReplicaRequest request, TransportChannel channel, ReplicationTask task) { + AsyncReplicaAction(ReplicaRequest request, String targetAllocationID, TransportChannel channel, ReplicationTask task) { this.request = request; this.channel = channel; this.task = task; + this.targetAllocationID = targetAllocationID; } @Override @@ -444,7 +466,13 @@ public abstract class TransportReplicationAction< @Override public void onFailure(Exception e) { if (e instanceof RetryOnReplicaException) { - logger.trace("Retrying operation on replica, action [{}], request [{}]", e, transportReplicaAction, request); + logger.trace( + (org.apache.logging.log4j.util.Supplier) + () -> new ParameterizedMessage( + "Retrying operation on replica, action [{}], request [{}]", + transportReplicaAction, + request), + e); final ThreadContext.StoredContext context = threadPool.getThreadContext().newStoredContext(); observer.waitForNextChange(new ClusterStateObserver.Listener() { @Override @@ -455,7 +483,9 @@ public abstract class TransportReplicationAction< String extraMessage = "action [" + transportReplicaAction + "], request[" + request + "]"; TransportChannelResponseHandler handler = new TransportChannelResponseHandler<>(logger, channel, extraMessage, () -> TransportResponse.Empty.INSTANCE); - transportService.sendRequest(clusterService.localNode(), transportReplicaAction, request, handler); + transportService.sendRequest(clusterService.localNode(), transportReplicaAction, + new ConcreteShardRequest<>(request, targetAllocationID), + handler); } @Override @@ -479,7 +509,12 @@ public abstract class TransportReplicationAction< channel.sendResponse(e); } catch (IOException responseException) { responseException.addSuppressed(e); - logger.warn("failed to send error message back to client for action [{}]", responseException, transportReplicaAction); + logger.warn( + (org.apache.logging.log4j.util.Supplier) + () -> new ParameterizedMessage( + "failed to send error message back to client for action [{}]", + transportReplicaAction), + responseException); } } @@ -487,7 +522,7 @@ public abstract class TransportReplicationAction< protected void doRun() throws Exception { setPhase(task, "replica"); assert request.shardId() != null : "request shardId must be set"; - acquireReplicaOperationLock(request.shardId(), request.primaryTerm(), this); + acquireReplicaOperationLock(request.shardId(), request.primaryTerm(), targetAllocationID, this); } /** @@ -584,7 +619,7 @@ public abstract class TransportReplicationAction< logger.trace("send action [{}] on primary [{}] for request [{}] with cluster state version [{}] to [{}] ", transportPrimaryAction, request.shardId(), request, state.version(), primary.currentNodeId()); } - performAction(node, transportPrimaryAction, true); + performAction(node, transportPrimaryAction, true, new ConcreteShardRequest<>(request, primary.allocationId().getId())); } private void performRemoteAction(ClusterState state, ShardRouting primary, DiscoveryNode node) { @@ -606,7 +641,7 @@ public abstract class TransportReplicationAction< request.shardId(), request, state.version(), primary.currentNodeId()); } setPhase(task, "rerouted"); - performAction(node, actionName, false); + performAction(node, actionName, false, request); } private boolean retryIfUnavailable(ClusterState state, ShardRouting primary) { @@ -657,8 +692,9 @@ public abstract class TransportReplicationAction< } } - private void performAction(final DiscoveryNode node, final String action, final boolean isPrimaryAction) { - transportService.sendRequest(node, action, request, transportOptions, new TransportResponseHandler() { + private void performAction(final DiscoveryNode node, final String action, final boolean isPrimaryAction, + final TransportRequest requestToPerform) { + transportService.sendRequest(node, action, requestToPerform, transportOptions, new TransportResponseHandler() { @Override public Response newInstance() { @@ -682,8 +718,12 @@ public abstract class TransportReplicationAction< final Throwable cause = exp.unwrapCause(); if (cause instanceof ConnectTransportException || cause instanceof NodeClosedException || (isPrimaryAction && retryPrimaryException(cause))) { - logger.trace("received an error from node [{}] for request [{}], scheduling a retry", exp, node.getId(), - request); + logger.trace( + (org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( + "received an error from node [{}] for request [{}], scheduling a retry", + node.getId(), + requestToPerform), + exp); retry(exp); } else { finishAsFailed(exp); @@ -704,6 +744,7 @@ public abstract class TransportReplicationAction< return; } setPhase(task, "waiting_for_retry"); + request.onRetry(); final ThreadContext.StoredContext context = threadPool.getThreadContext().newStoredContext(); observer.waitForNextChange(new ClusterStateObserver.Listener() { @Override @@ -729,7 +770,9 @@ public abstract class TransportReplicationAction< void finishAsFailed(Exception failure) { if (finished.compareAndSet(false, true)) { setPhase(task, "failed"); - logger.trace("operation failed. action [{}], request [{}]", failure, actionName, request); + logger.trace( + (org.apache.logging.log4j.util.Supplier) + () -> new ParameterizedMessage("operation failed. action [{}], request [{}]", actionName, request), failure); listener.onFailure(failure); } else { assert false : "finishAsFailed called but operation is already finished"; @@ -737,7 +780,13 @@ public abstract class TransportReplicationAction< } void finishWithUnexpectedFailure(Exception failure) { - logger.warn("unexpected error during the primary phase for action [{}], request [{}]", failure, actionName, request); + logger.warn( + (org.apache.logging.log4j.util.Supplier) + () -> new ParameterizedMessage( + "unexpected error during the primary phase for action [{}], request [{}]", + actionName, + request), + failure); if (finished.compareAndSet(false, true)) { setPhase(task, "failed"); listener.onFailure(failure); @@ -767,7 +816,8 @@ public abstract class TransportReplicationAction< * tries to acquire reference to {@link IndexShard} to perform a primary operation. Released after performing primary operation locally * and replication of the operation to all replica shards is completed / failed (see {@link ReplicationOperation}). */ - protected void acquirePrimaryShardReference(ShardId shardId, ActionListener onReferenceAcquired) { + protected void acquirePrimaryShardReference(ShardId shardId, String allocationId, + ActionListener onReferenceAcquired) { IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); IndexShard indexShard = indexService.getShard(shardId.id()); // we may end up here if the cluster state used to route the primary is so stale that the underlying @@ -777,6 +827,10 @@ public abstract class TransportReplicationAction< throw new ReplicationOperation.RetryOnPrimaryException(indexShard.shardId(), "actual shard is not a primary " + indexShard.routingEntry()); } + final String actualAllocationId = indexShard.routingEntry().allocationId().getId(); + if (actualAllocationId.equals(allocationId) == false) { + throw new ShardNotFoundException(shardId, "expected aID [{}] but found [{}]", allocationId, actualAllocationId); + } ActionListener onAcquired = new ActionListener() { @Override @@ -796,9 +850,14 @@ public abstract class TransportReplicationAction< /** * tries to acquire an operation on replicas. The lock is closed as soon as replication is completed on the node. */ - protected void acquireReplicaOperationLock(ShardId shardId, long primaryTerm, ActionListener onLockAcquired) { + protected void acquireReplicaOperationLock(ShardId shardId, long primaryTerm, final String allocationId, + ActionListener onLockAcquired) { IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); IndexShard indexShard = indexService.getShard(shardId.id()); + final String actualAllocationId = indexShard.routingEntry().allocationId().getId(); + if (actualAllocationId.equals(allocationId) == false) { + throw new ShardNotFoundException(shardId, "expected aID [{}] but found [{}]", allocationId, actualAllocationId); + } indexShard.acquireReplicaOperationLock(primaryTerm, onLockAcquired, executor); } @@ -861,7 +920,8 @@ public abstract class TransportReplicationAction< listener.onFailure(new NoNodeAvailableException("unknown node [" + nodeId + "]")); return; } - transportService.sendRequest(node, transportReplicaAction, request, transportOptions, + transportService.sendRequest(node, transportReplicaAction, + new ConcreteShardRequest<>(request, replica.allocationId().getId()), transportOptions, new ActionListenerResponseHandler<>(listener, () -> TransportResponse.Empty.INSTANCE)); } @@ -903,6 +963,72 @@ public abstract class TransportReplicationAction< } } + /** a wrapper class to encapsulate a request when being sent to a specific allocation id **/ + public static final class ConcreteShardRequest extends TransportRequest { + + /** {@link AllocationId#getId()} of the shard this request is sent to **/ + private String targetAllocationID; + + private R request; + + ConcreteShardRequest(Supplier requestSupplier) { + request = requestSupplier.get(); + // null now, but will be populated by reading from the streams + targetAllocationID = null; + } + + ConcreteShardRequest(R request, String targetAllocationID) { + Objects.requireNonNull(request); + Objects.requireNonNull(targetAllocationID); + this.request = request; + this.targetAllocationID = targetAllocationID; + } + + @Override + public void setParentTask(String parentTaskNode, long parentTaskId) { + request.setParentTask(parentTaskNode, parentTaskId); + } + + @Override + public void setParentTask(TaskId taskId) { + request.setParentTask(taskId); + } + + @Override + public TaskId getParentTask() { + return request.getParentTask(); + } + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId) { + return request.createTask(id, type, action, parentTaskId); + } + + @Override + public String getDescription() { + return "[" + request.getDescription() + "] for aID [" + targetAllocationID + "]"; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + targetAllocationID = in.readString(); + request.readFrom(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(targetAllocationID); + request.writeTo(out); + } + + public R getRequest() { + return request; + } + + public String getTargetAllocationID() { + return targetAllocationID; + } + } + /** * Sets the current phase on the task if it isn't null. Pulled into its own * method because its more convenient that way. diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java index c472c7454ab..39b49a4a409 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.support.replication; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.WriteRequest; @@ -27,7 +28,6 @@ import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; @@ -241,13 +241,13 @@ public abstract class TransportWriteAction< private final RespondingWriteResult respond; private final IndexShard indexShard; private final WriteRequest request; - private final ESLogger logger; + private final Logger logger; AsyncAfterWriteAction(final IndexShard indexShard, final WriteRequest request, @Nullable final Translog.Location location, final RespondingWriteResult respond, - final ESLogger logger) { + final Logger logger) { this.indexShard = indexShard; this.request = request; boolean waitUntilRefresh = false; diff --git a/core/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java b/core/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java index a3964b6bbfa..8981caa60f7 100644 --- a/core/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.support.single.shard; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.NoShardAvailableActionException; @@ -39,10 +40,10 @@ import org.elasticsearch.common.logging.LoggerMessageFormat; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportRequestHandler; +import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportService; import java.util.function.Supplier; @@ -187,7 +188,9 @@ public abstract class TransportSingleShardAction) + () -> new ParameterizedMessage("{}: failed to execute [{}]", shardRouting, internalRequest.request()), e); } perform(e); } @@ -205,7 +208,9 @@ public abstract class TransportSingleShardAction) + () -> new ParameterizedMessage("{}: failed to execute [{}]", null, internalRequest.request()), failure); } } listener.onFailure(failure); diff --git a/core/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java b/core/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java index a30d9c1f254..6752ccd7293 100644 --- a/core/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.support.tasks; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.FailedNodeException; @@ -38,7 +39,6 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.NodeShouldNotConnectException; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportException; @@ -46,6 +46,7 @@ import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportService; import java.io.IOException; @@ -275,7 +276,9 @@ public abstract class TransportTasksAction< private void onFailure(int idx, String nodeId, Throwable t) { if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) { - logger.debug("failed to execute on node [{}]", t, nodeId); + logger.debug( + (org.apache.logging.log4j.util.Supplier) + () -> new ParameterizedMessage("failed to execute on node [{}]", nodeId), t); } if (accumulateExceptions()) { responses.set(idx, new FailedNodeException(nodeId, "Failed node [" + nodeId + "]", t)); diff --git a/core/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java b/core/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java index da12831f1c7..8c1d06113d6 100644 --- a/core/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java +++ b/core/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java @@ -19,6 +19,8 @@ package org.elasticsearch.action.termvectors; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.TransportActions; @@ -87,7 +89,7 @@ public class TransportShardMultiTermsVectorAction extends TransportSingleShardAc if (TransportActions.isShardNotAvailableException(t)) { throw (ElasticsearchException) t; } else { - logger.debug("{} failed to execute multi term vectors for [{}]/[{}]", t, shardId, termVectorsRequest.type(), termVectorsRequest.id()); + logger.debug((Supplier) () -> new ParameterizedMessage("{} failed to execute multi term vectors for [{}]/[{}]", shardId, termVectorsRequest.type(), termVectorsRequest.id()), t); response.add(request.locations.get(i), new MultiTermVectorsResponse.Failure(request.index(), termVectorsRequest.type(), termVectorsRequest.id(), t)); } diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java index e2c2e3dce76..2a38a020fee 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java @@ -19,6 +19,7 @@ package org.elasticsearch.bootstrap; +import org.apache.logging.log4j.Logger; import org.apache.lucene.util.Constants; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.StringHelper; @@ -28,7 +29,6 @@ import org.elasticsearch.cli.Terminal; import org.elasticsearch.common.PidFile; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.inject.CreationException; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.LogConfigurator; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; @@ -81,7 +81,7 @@ final class Bootstrap { /** initialize native resources */ public static void initializeNatives(Path tmpFile, boolean mlockAll, boolean seccomp, boolean ctrlHandler) { - final ESLogger logger = Loggers.getLogger(Bootstrap.class); + final Logger logger = Loggers.getLogger(Bootstrap.class); // check if the user is running as root, and bail if (Natives.definitelyRunningAsRoot()) { @@ -227,7 +227,7 @@ final class Bootstrap { INSTANCE = new Bootstrap(); Environment environment = initialEnvironment(foreground, pidFile, esSettings); - LogConfigurator.configure(environment.settings(), true); + LogConfigurator.configure(environment, true); checkForCustomConfFile(); if (environment.pidFile() != null) { @@ -264,7 +264,7 @@ final class Bootstrap { if (foreground) { Loggers.disableConsoleLogging(); } - ESLogger logger = Loggers.getLogger(Bootstrap.class); + Logger logger = Loggers.getLogger(Bootstrap.class); if (INSTANCE.node != null) { logger = Loggers.getLogger(Bootstrap.class, Node.NODE_NAME_SETTING.get(INSTANCE.node.settings())); } @@ -310,7 +310,7 @@ final class Bootstrap { private static void checkUnsetAndMaybeExit(String confFileSetting, String settingName) { if (confFileSetting != null && confFileSetting.isEmpty() == false) { - ESLogger logger = Loggers.getLogger(Bootstrap.class); + Logger logger = Loggers.getLogger(Bootstrap.class); logger.info("{} is no longer supported. elasticsearch.yml must be placed in the config directory and cannot be renamed.", settingName); exit(1); } diff --git a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java index 94143f9986c..2091b4f3d35 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java @@ -19,15 +19,16 @@ package org.elasticsearch.bootstrap; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.Constants; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.PathUtils; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.monitor.process.ProcessProbe; import org.elasticsearch.node.Node; @@ -100,7 +101,7 @@ final class BootstrapCheck { final boolean enforceLimits, final boolean ignoreSystemChecks, final List checks, - final ESLogger logger) { + final Logger logger) { final List errors = new ArrayList<>(); final List ignoredErrors = new ArrayList<>(); @@ -136,7 +137,7 @@ final class BootstrapCheck { } - static void log(final ESLogger logger, final String error) { + static void log(final Logger logger, final String error) { logger.warn(error); } @@ -417,7 +418,7 @@ final class BootstrapCheck { } // visible for testing - long getMaxMapCount(ESLogger logger) { + long getMaxMapCount(Logger logger) { final Path path = getProcSysVmMaxMapCountPath(); try (final BufferedReader bufferedReader = getBufferedReader(path)) { final String rawProcSysVmMaxMapCount = readProcSysVmMaxMapCount(bufferedReader); @@ -425,11 +426,15 @@ final class BootstrapCheck { try { return parseProcSysVmMaxMapCount(rawProcSysVmMaxMapCount); } catch (final NumberFormatException e) { - logger.warn("unable to parse vm.max_map_count [{}]", e, rawProcSysVmMaxMapCount); + logger.warn( + (Supplier) () -> new ParameterizedMessage( + "unable to parse vm.max_map_count [{}]", + rawProcSysVmMaxMapCount), + e); } } } catch (final IOException e) { - logger.warn("I/O exception while trying to read [{}]", e, path); + logger.warn((Supplier) () -> new ParameterizedMessage("I/O exception while trying to read [{}]", path), e); } return -1; } diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java b/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java index de6a0339cb0..fa9b1530477 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java @@ -116,4 +116,5 @@ class Elasticsearch extends SettingCommand { static void close(String[] args) throws IOException { Bootstrap.stop(); } + } diff --git a/core/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java b/core/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java index 45d54ed4a62..b1df4f5ccc0 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java @@ -19,9 +19,10 @@ package org.elasticsearch.bootstrap; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.index.MergePolicy; import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import java.io.IOError; @@ -76,14 +77,17 @@ class ElasticsearchUncaughtExceptionHandler implements Thread.UncaughtExceptionH // visible for testing void onFatalUncaught(final String threadName, final Throwable t) { - final ESLogger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get()); - logger.error("fatal error in thread [{}], exiting", t, threadName); + final Logger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get()); + logger.error( + (org.apache.logging.log4j.util.Supplier) + () -> new ParameterizedMessage("fatal error in thread [{}], exiting", threadName), t); } // visible for testing void onNonFatalUncaught(final String threadName, final Throwable t) { - final ESLogger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get()); - logger.warn("uncaught exception in thread [{}]", t, threadName); + final Logger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get()); + logger.warn((org.apache.logging.log4j.util.Supplier) + () -> new ParameterizedMessage("uncaught exception in thread [{}]", threadName), t); } // visible for testing diff --git a/core/src/main/java/org/elasticsearch/bootstrap/JNACLibrary.java b/core/src/main/java/org/elasticsearch/bootstrap/JNACLibrary.java index 5d1369b21f7..fe0f400698f 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/JNACLibrary.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/JNACLibrary.java @@ -22,8 +22,8 @@ package org.elasticsearch.bootstrap; import com.sun.jna.Native; import com.sun.jna.NativeLong; import com.sun.jna.Structure; +import org.apache.logging.log4j.Logger; import org.apache.lucene.util.Constants; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import java.util.Arrays; @@ -34,7 +34,7 @@ import java.util.List; */ final class JNACLibrary { - private static final ESLogger logger = Loggers.getLogger(JNACLibrary.class); + private static final Logger logger = Loggers.getLogger(JNACLibrary.class); public static final int MCL_CURRENT = 1; public static final int ENOMEM = 12; diff --git a/core/src/main/java/org/elasticsearch/bootstrap/JNAKernel32Library.java b/core/src/main/java/org/elasticsearch/bootstrap/JNAKernel32Library.java index 50dab6888b6..747ba2e458f 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/JNAKernel32Library.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/JNAKernel32Library.java @@ -25,8 +25,8 @@ import com.sun.jna.NativeLong; import com.sun.jna.Pointer; import com.sun.jna.Structure; import com.sun.jna.win32.StdCallLibrary; +import org.apache.logging.log4j.Logger; import org.apache.lucene.util.Constants; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import java.util.ArrayList; @@ -40,7 +40,7 @@ import java.util.List; */ final class JNAKernel32Library { - private static final ESLogger logger = Loggers.getLogger(JNAKernel32Library.class); + private static final Logger logger = Loggers.getLogger(JNAKernel32Library.class); // Callbacks must be kept around in order to be able to be called later, // when the Windows ConsoleCtrlHandler sends an event. diff --git a/core/src/main/java/org/elasticsearch/bootstrap/JNANatives.java b/core/src/main/java/org/elasticsearch/bootstrap/JNANatives.java index 5a8693b3137..5f3e357ff5f 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/JNANatives.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/JNANatives.java @@ -21,8 +21,8 @@ package org.elasticsearch.bootstrap; import com.sun.jna.Native; import com.sun.jna.Pointer; +import org.apache.logging.log4j.Logger; import org.apache.lucene.util.Constants; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.monitor.jvm.JvmInfo; @@ -39,7 +39,7 @@ class JNANatives { /** no instantiation */ private JNANatives() {} - private static final ESLogger logger = Loggers.getLogger(JNANatives.class); + private static final Logger logger = Loggers.getLogger(JNANatives.class); // Set to true, in case native mlockall call was successful static boolean LOCAL_MLOCKALL = false; diff --git a/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java b/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java index 3f77f6bcee0..4a0aaec68c2 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java @@ -19,10 +19,10 @@ package org.elasticsearch.bootstrap; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.PathUtils; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import java.io.IOException; @@ -76,7 +76,7 @@ public class JarHell { */ public static void checkJarHell() throws Exception { ClassLoader loader = JarHell.class.getClassLoader(); - ESLogger logger = Loggers.getLogger(JarHell.class); + Logger logger = Loggers.getLogger(JarHell.class); if (logger.isDebugEnabled()) { logger.debug("java.class.path: {}", System.getProperty("java.class.path")); logger.debug("sun.boot.class.path: {}", System.getProperty("sun.boot.class.path")); @@ -86,7 +86,7 @@ public class JarHell { } checkJarHell(parseClassPath()); } - + /** * Parses the classpath into an array of URLs * @return array of URLs @@ -150,7 +150,7 @@ public class JarHell { */ @SuppressForbidden(reason = "needs JarFile for speed, just reading entries") public static void checkJarHell(URL urls[]) throws Exception { - ESLogger logger = Loggers.getLogger(JarHell.class); + Logger logger = Loggers.getLogger(JarHell.class); // we don't try to be sneaky and use deprecated/internal/not portable stuff // like sun.boot.class.path, and with jigsaw we don't yet have a way to get // a "list" at all. So just exclude any elements underneath the java home @@ -168,7 +168,7 @@ public class JarHell { if (path.toString().endsWith(".jar")) { if (!seenJars.add(path)) { logger.debug("excluding duplicate classpath element: {}", path); - continue; // we can't fail because of sheistiness with joda-time + continue; } logger.debug("examining jar: {}", path); try (JarFile file = new JarFile(path.toString())) { @@ -271,11 +271,13 @@ public class JarHell { "class: " + clazz + System.lineSeparator() + "exists multiple times in jar: " + jarpath + " !!!!!!!!!"); } else { - if (clazz.startsWith("org.apache.log4j")) { - return; // go figure, jar hell for what should be System.out.println... - } - if (clazz.equals("org.joda.time.base.BaseDateTime")) { - return; // apparently this is intentional... clean this up + if (clazz.startsWith("org.apache.logging.log4j.core.impl.ThrowableProxy")) { + /* + * deliberate to hack around a bug in Log4j + * cf. https://github.com/elastic/elasticsearch/issues/20304 + * cf. https://issues.apache.org/jira/browse/LOG4J2-1560 + */ + return; } throw new IllegalStateException("jar hell!" + System.lineSeparator() + "class: " + clazz + System.lineSeparator() + diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Natives.java b/core/src/main/java/org/elasticsearch/bootstrap/Natives.java index 6dba1f3a1bf..9fad34e329f 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Natives.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Natives.java @@ -19,7 +19,7 @@ package org.elasticsearch.bootstrap; -import org.elasticsearch.common.logging.ESLogger; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.Loggers; import java.nio.file.Path; @@ -32,7 +32,7 @@ final class Natives { /** no instantiation */ private Natives() {} - private static final ESLogger logger = Loggers.getLogger(Natives.class); + private static final Logger logger = Loggers.getLogger(Natives.class); // marker to determine if the JNA class files are available to the JVM static final boolean JNA_AVAILABLE; diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Seccomp.java b/core/src/main/java/org/elasticsearch/bootstrap/Seccomp.java index 6f6c3dc557b..88c618d445c 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Seccomp.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Seccomp.java @@ -26,9 +26,9 @@ import com.sun.jna.NativeLong; import com.sun.jna.Pointer; import com.sun.jna.Structure; import com.sun.jna.ptr.PointerByReference; +import org.apache.logging.log4j.Logger; import org.apache.lucene.util.Constants; import org.apache.lucene.util.IOUtils; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import java.io.IOException; @@ -92,7 +92,7 @@ import java.util.Map; */ // not an example of how to write code!!! final class Seccomp { - private static final ESLogger logger = Loggers.getLogger(Seccomp.class); + private static final Logger logger = Loggers.getLogger(Seccomp.class); // Linux implementation, based on seccomp(2) or prctl(2) with bpf filtering diff --git a/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java b/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java index 7bc0f546483..18c2d15ec39 100644 --- a/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java +++ b/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java @@ -20,7 +20,8 @@ package org.elasticsearch.client.transport; import com.carrotsearch.hppc.cursors.ObjectCursor; - +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -32,9 +33,8 @@ import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.client.Requests; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -43,11 +43,11 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.FutureTransportResponseHandler; import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportRequestOptions; +import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportService; import java.io.Closeable; @@ -340,7 +340,7 @@ public class TransportClientNodesService extends AbstractComponent implements Cl transportService.connectToNode(node); } catch (Exception e) { it.remove(); - logger.debug("failed to connect to discovered node [{}]", e, node); + logger.debug((Supplier) () -> new ParameterizedMessage("failed to connect to discovered node [{}]", node), e); } } } @@ -377,7 +377,9 @@ public class TransportClientNodesService extends AbstractComponent implements Cl logger.trace("connecting to listed node (light) [{}]", listedNode); transportService.connectToNodeLight(listedNode); } catch (Exception e) { - logger.debug("failed to connect to node [{}], removed from nodes list", e, listedNode); + logger.debug( + (Supplier) + () -> new ParameterizedMessage("failed to connect to node [{}], removed from nodes list", listedNode), e); newFilteredNodes.add(listedNode); continue; } @@ -409,7 +411,8 @@ public class TransportClientNodesService extends AbstractComponent implements Cl newNodes.add(listedNode); } } catch (Exception e) { - logger.info("failed to get node info for {}, disconnecting...", e, listedNode); + logger.info( + (Supplier) () -> new ParameterizedMessage("failed to get node info for {}, disconnecting...", listedNode), e); transportService.disconnectFromNode(listedNode); } } @@ -453,7 +456,9 @@ public class TransportClientNodesService extends AbstractComponent implements Cl transportService.connectToNodeLight(listedNode); } } catch (Exception e) { - logger.debug("failed to connect to node [{}], ignoring...", e, listedNode); + logger.debug( + (Supplier) + () -> new ParameterizedMessage("failed to connect to node [{}], ignoring...", listedNode), e); latch.countDown(); return; } @@ -482,13 +487,17 @@ public class TransportClientNodesService extends AbstractComponent implements Cl @Override public void handleException(TransportException e) { - logger.info("failed to get local cluster state for {}, disconnecting...", e, listedNode); + logger.info( + (Supplier) () -> new ParameterizedMessage( + "failed to get local cluster state for {}, disconnecting...", listedNode), e); transportService.disconnectFromNode(listedNode); latch.countDown(); } }); } catch (Exception e) { - logger.info("failed to get local cluster state info for {}, disconnecting...", e, listedNode); + logger.info( + (Supplier)() -> new ParameterizedMessage( + "failed to get local cluster state info for {}, disconnecting...", listedNode), e); transportService.disconnectFromNode(listedNode); latch.countDown(); } diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java index 2b00c575374..4e582cb32ca 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.action.index.NodeMappingRefreshAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; @@ -44,17 +45,16 @@ import org.elasticsearch.cluster.routing.allocation.decider.ConcurrentRebalanceA import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider; +import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.NodeVersionAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.RebalanceOnlyWhenActiveAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryActiveAllocationDecider; -import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.AbstractModule; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java b/core/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java index 228ac3f41b9..e18ec5543d9 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java @@ -19,10 +19,10 @@ package org.elasticsearch.cluster; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -35,7 +35,7 @@ import java.util.concurrent.atomic.AtomicReference; */ public class ClusterStateObserver { - protected final ESLogger logger; + protected final Logger logger; public final ChangePredicate MATCH_ALL_CHANGES_PREDICATE = new EventPredicate() { @@ -58,7 +58,7 @@ public class ClusterStateObserver { volatile boolean timedOut; - public ClusterStateObserver(ClusterService clusterService, ESLogger logger, ThreadContext contextHolder) { + public ClusterStateObserver(ClusterService clusterService, Logger logger, ThreadContext contextHolder) { this(clusterService, new TimeValue(60000), logger, contextHolder); } @@ -67,7 +67,7 @@ public class ClusterStateObserver { * will fail any existing or new #waitForNextChange calls. Set to null * to wait indefinitely */ - public ClusterStateObserver(ClusterService clusterService, @Nullable TimeValue timeout, ESLogger logger, ThreadContext contextHolder) { + public ClusterStateObserver(ClusterService clusterService, @Nullable TimeValue timeout, Logger logger, ThreadContext contextHolder) { this.clusterService = clusterService; this.lastObservedState = new AtomicReference<>(new ObservedState(clusterService.state())); this.timeOutValue = timeout; diff --git a/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java b/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java index 534f007e8b2..c2cdaf90bc4 100644 --- a/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java +++ b/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; @@ -39,7 +40,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -379,7 +379,7 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu return clusterInfo; } - static void buildShardLevelInfo(ESLogger logger, ShardStats[] stats, ImmutableOpenMap.Builder newShardSizes, + static void buildShardLevelInfo(Logger logger, ShardStats[] stats, ImmutableOpenMap.Builder newShardSizes, ImmutableOpenMap.Builder newShardRoutingToDataPath, ClusterState state) { MetaData meta = state.getMetaData(); for (ShardStats s : stats) { @@ -402,7 +402,7 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu } } - static void fillDiskUsagePerNode(ESLogger logger, List nodeStatsArray, + static void fillDiskUsagePerNode(Logger logger, List nodeStatsArray, ImmutableOpenMap.Builder newLeastAvaiableUsages, ImmutableOpenMap.Builder newMostAvaiableUsages) { for (NodeStats nodeStats : nodeStatsArray) { diff --git a/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java b/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java index a487bda0db4..99f161b9da5 100644 --- a/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java +++ b/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java @@ -18,6 +18,8 @@ */ package org.elasticsearch.cluster; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; @@ -91,7 +93,7 @@ public class NodeConnectionsService extends AbstractLifecycleComponent { try { transportService.disconnectFromNode(node); } catch (Exception e) { - logger.warn("failed to disconnect to node [{}]", e, node); + logger.warn((Supplier) () -> new ParameterizedMessage("failed to disconnect to node [{}]", node), e); } } } @@ -113,7 +115,11 @@ public class NodeConnectionsService extends AbstractLifecycleComponent { nodeFailureCount = nodeFailureCount + 1; // log every 6th failure if ((nodeFailureCount % 6) == 1) { - logger.warn("failed to connect to node {} (tried [{}] times)", e, node, nodeFailureCount); + final int finalNodeFailureCount = nodeFailureCount; + logger.warn( + (Supplier) + () -> new ParameterizedMessage( + "failed to connect to node {} (tried [{}] times)", node, finalNodeFailureCount), e); } nodes.put(node, nodeFailureCount); } diff --git a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index c21d10382a8..13aa148f8b1 100644 --- a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -19,6 +19,9 @@ package org.elasticsearch.cluster.action.shard; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -43,7 +46,6 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.Discovery; @@ -108,7 +110,7 @@ public class ShardStateAction extends AbstractComponent { if (isMasterChannelException(exp)) { waitForNewMasterAndRetry(actionName, observer, shardEntry, listener); } else { - logger.warn("{} unexpected failure while sending request [{}] to [{}] for shard entry [{}]", exp, shardEntry.shardId, actionName, masterNode, shardEntry); + logger.warn((Supplier) () -> new ParameterizedMessage("{} unexpected failure while sending request [{}] to [{}] for shard entry [{}]", shardEntry.shardId, actionName, masterNode, shardEntry), exp); listener.onFailure(exp instanceof RemoteTransportException ? (Exception) (exp.getCause() instanceof Exception ? exp.getCause() : new ElasticsearchException(exp.getCause())) : exp); } } @@ -169,7 +171,7 @@ public class ShardStateAction extends AbstractComponent { @Override public void onClusterServiceClose() { - logger.warn("{} node closed while execution action [{}] for shard entry [{}]", shardEntry.failure, shardEntry.shardId, actionName, shardEntry); + logger.warn((Supplier) () -> new ParameterizedMessage("{} node closed while execution action [{}] for shard entry [{}]", shardEntry.shardId, actionName, shardEntry), shardEntry.failure); listener.onFailure(new NodeClosedException(clusterService.localNode())); } @@ -184,9 +186,9 @@ public class ShardStateAction extends AbstractComponent { private static class ShardFailedTransportHandler implements TransportRequestHandler { private final ClusterService clusterService; private final ShardFailedClusterStateTaskExecutor shardFailedClusterStateTaskExecutor; - private final ESLogger logger; + private final Logger logger; - public ShardFailedTransportHandler(ClusterService clusterService, ShardFailedClusterStateTaskExecutor shardFailedClusterStateTaskExecutor, ESLogger logger) { + public ShardFailedTransportHandler(ClusterService clusterService, ShardFailedClusterStateTaskExecutor shardFailedClusterStateTaskExecutor, Logger logger) { this.clusterService = clusterService; this.shardFailedClusterStateTaskExecutor = shardFailedClusterStateTaskExecutor; this.logger = logger; @@ -194,7 +196,7 @@ public class ShardStateAction extends AbstractComponent { @Override public void messageReceived(ShardEntry request, TransportChannel channel) throws Exception { - logger.warn("{} received shard failed for {}", request.failure, request.shardId, request); + logger.warn((Supplier) () -> new ParameterizedMessage("{} received shard failed for {}", request.shardId, request), request.failure); clusterService.submitStateUpdateTask( "shard-failed", request, @@ -203,12 +205,12 @@ public class ShardStateAction extends AbstractComponent { new ClusterStateTaskListener() { @Override public void onFailure(String source, Exception e) { - logger.error("{} unexpected failure while failing shard [{}]", e, request.shardId, request); + logger.error((Supplier) () -> new ParameterizedMessage("{} unexpected failure while failing shard [{}]", request.shardId, request), e); try { channel.sendResponse(e); } catch (Exception channelException) { channelException.addSuppressed(e); - logger.warn("{} failed to send failure [{}] while failing shard [{}]", channelException, request.shardId, e, request); + logger.warn((Supplier) () -> new ParameterizedMessage("{} failed to send failure [{}] while failing shard [{}]", request.shardId, e, request), channelException); } } @@ -218,7 +220,7 @@ public class ShardStateAction extends AbstractComponent { try { channel.sendResponse(new NotMasterException(source)); } catch (Exception channelException) { - logger.warn("{} failed to send no longer master while failing shard [{}]", channelException, request.shardId, request); + logger.warn((Supplier) () -> new ParameterizedMessage("{} failed to send no longer master while failing shard [{}]", request.shardId, request), channelException); } } @@ -227,7 +229,7 @@ public class ShardStateAction extends AbstractComponent { try { channel.sendResponse(TransportResponse.Empty.INSTANCE); } catch (Exception channelException) { - logger.warn("{} failed to send response while failing shard [{}]", channelException, request.shardId, request); + logger.warn((Supplier) () -> new ParameterizedMessage("{} failed to send response while failing shard [{}]", request.shardId, request), channelException); } } } @@ -238,9 +240,9 @@ public class ShardStateAction extends AbstractComponent { public static class ShardFailedClusterStateTaskExecutor implements ClusterStateTaskExecutor { private final AllocationService allocationService; private final RoutingService routingService; - private final ESLogger logger; + private final Logger logger; - public ShardFailedClusterStateTaskExecutor(AllocationService allocationService, RoutingService routingService, ESLogger logger) { + public ShardFailedClusterStateTaskExecutor(AllocationService allocationService, RoutingService routingService, Logger logger) { this.allocationService = allocationService; this.routingService = routingService; this.logger = logger; @@ -315,7 +317,7 @@ public class ShardStateAction extends AbstractComponent { } batchResultBuilder.successes(tasksToBeApplied); } catch (Exception e) { - logger.warn("failed to apply failed shards {}", e, shardRoutingsToBeApplied); + logger.warn((Supplier) () -> new ParameterizedMessage("failed to apply failed shards {}", shardRoutingsToBeApplied), e); // failures are communicated back to the requester // cluster state will not be updated in this case batchResultBuilder.failures(tasksToBeApplied, e); @@ -352,9 +354,9 @@ public class ShardStateAction extends AbstractComponent { private static class ShardStartedTransportHandler implements TransportRequestHandler { private final ClusterService clusterService; private final ShardStartedClusterStateTaskExecutor shardStartedClusterStateTaskExecutor; - private final ESLogger logger; + private final Logger logger; - public ShardStartedTransportHandler(ClusterService clusterService, ShardStartedClusterStateTaskExecutor shardStartedClusterStateTaskExecutor, ESLogger logger) { + public ShardStartedTransportHandler(ClusterService clusterService, ShardStartedClusterStateTaskExecutor shardStartedClusterStateTaskExecutor, Logger logger) { this.clusterService = clusterService; this.shardStartedClusterStateTaskExecutor = shardStartedClusterStateTaskExecutor; this.logger = logger; @@ -375,9 +377,9 @@ public class ShardStateAction extends AbstractComponent { public static class ShardStartedClusterStateTaskExecutor implements ClusterStateTaskExecutor, ClusterStateTaskListener { private final AllocationService allocationService; - private final ESLogger logger; + private final Logger logger; - public ShardStartedClusterStateTaskExecutor(AllocationService allocationService, ESLogger logger) { + public ShardStartedClusterStateTaskExecutor(AllocationService allocationService, Logger logger) { this.allocationService = allocationService; this.logger = logger; } @@ -431,7 +433,7 @@ public class ShardStateAction extends AbstractComponent { } builder.successes(tasksToBeApplied); } catch (Exception e) { - logger.warn("failed to apply started shards {}", e, shardRoutingsToBeApplied); + logger.warn((Supplier) () -> new ParameterizedMessage("failed to apply started shards {}", shardRoutingsToBeApplied), e); builder.failures(tasksToBeApplied, e); } @@ -440,7 +442,7 @@ public class ShardStateAction extends AbstractComponent { @Override public void onFailure(String source, Exception e) { - logger.error("unexpected failure during [{}]", e, source); + logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); } } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index 29bb55f8107..fd7e08fec31 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -22,6 +22,7 @@ package org.elasticsearch.cluster.metadata; import com.carrotsearch.hppc.ObjectHashSet; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.apache.logging.log4j.Logger; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.Diffable; @@ -38,7 +39,6 @@ import org.elasticsearch.common.collect.HppcMaps; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -758,7 +758,7 @@ public class MetaData implements Iterable, Diffable, Fr /** As of 2.0 we require units for time and byte-sized settings. This methods adds default units to any cluster settings that don't * specify a unit. */ - public static MetaData addDefaultUnitsIfNeeded(ESLogger logger, MetaData metaData) { + public static MetaData addDefaultUnitsIfNeeded(Logger logger, MetaData metaData) { Settings.Builder newPersistentSettings = null; for(Map.Entry ent : metaData.persistentSettings().getAsMap().entrySet()) { String settingName = ent.getKey(); diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 041850a0b27..8b3cbec0ebd 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -21,6 +21,8 @@ package org.elasticsearch.cluster.metadata; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; @@ -446,9 +448,9 @@ public class MetaDataCreateIndexService extends AbstractComponent { @Override public void onFailure(String source, Exception e) { if (e instanceof IndexAlreadyExistsException) { - logger.trace("[{}] failed to create", e, request.index()); + logger.trace((Supplier) () -> new ParameterizedMessage("[{}] failed to create", request.index()), e); } else { - logger.debug("[{}] failed to create", e, request.index()); + logger.debug((Supplier) () -> new ParameterizedMessage("[{}] failed to create", request.index()), e); } super.onFailure(source, e); } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java index 04631df57a0..c21454a09a0 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.metadata; import com.carrotsearch.hppc.cursors.ObjectCursor; - import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesClusterStateUpdateRequest; @@ -64,7 +63,7 @@ public class MetaDataIndexAliasesService extends AbstractComponent { private final AliasValidator aliasValidator; private final NodeServicesProvider nodeServicesProvider; - + private final MetaDataDeleteIndexService deleteIndexService; @Inject diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index c44fee0fb2b..8ce58637b15 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -20,6 +20,8 @@ package org.elasticsearch.cluster.metadata; import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingClusterStateUpdateRequest; import org.elasticsearch.cluster.AckedClusterStateTaskListener; @@ -193,7 +195,7 @@ public class MetaDataMappingService extends AbstractComponent { } } } catch (Exception e) { - logger.warn("[{}] failed to refresh-mapping in cluster state", e, index); + logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to refresh-mapping in cluster state", index), e); } return dirty; } @@ -207,7 +209,7 @@ public class MetaDataMappingService extends AbstractComponent { refreshTask, ClusterStateTaskConfig.build(Priority.HIGH), refreshExecutor, - (source, e) -> logger.warn("failure during [{}]", e, source) + (source, e) -> logger.warn((Supplier) () -> new ParameterizedMessage("failure during [{}]", source), e) ); } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java index 4eeb4339518..553e3b73247 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java @@ -21,6 +21,7 @@ package org.elasticsearch.cluster.routing; import com.carrotsearch.hppc.ObjectIntHashMap; import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.apache.logging.log4j.Logger; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -30,7 +31,6 @@ import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; @@ -452,7 +452,7 @@ public class RoutingNodes implements Iterable { * * @return the started shard */ - public ShardRouting startShard(ESLogger logger, ShardRouting initializingShard, RoutingChangesObserver routingChangesObserver) { + public ShardRouting startShard(Logger logger, ShardRouting initializingShard, RoutingChangesObserver routingChangesObserver) { ensureMutable(); ShardRouting startedShard = started(initializingShard); logger.trace("{} marked shard as started (routing: {})", initializingShard.shardId(), initializingShard); @@ -484,7 +484,7 @@ public class RoutingNodes implements Iterable { * - If shard is a (primary or replica) relocation target, this also clears the relocation information on the source shard. * */ - public void failShard(ESLogger logger, ShardRouting failedShard, UnassignedInfo unassignedInfo, IndexMetaData indexMetaData, + public void failShard(Logger logger, ShardRouting failedShard, UnassignedInfo unassignedInfo, IndexMetaData indexMetaData, RoutingChangesObserver routingChangesObserver) { ensureMutable(); assert failedShard.assignedToNode() : "only assigned shards can be failed"; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java index cfe48dd711e..8300d3e37fd 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java @@ -19,6 +19,8 @@ package org.elasticsearch.cluster.routing; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; @@ -113,16 +115,16 @@ public class RoutingService extends AbstractLifecycleComponent { rerouting.set(false); ClusterState state = clusterService.state(); if (logger.isTraceEnabled()) { - logger.error("unexpected failure during [{}], current state:\n{}", e, source, state.prettyPrint()); + logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}], current state:\n{}", source, state.prettyPrint()), e); } else { - logger.error("unexpected failure during [{}], current state version [{}]", e, source, state.version()); + logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}], current state version [{}]", source, state.version()), e); } } }); } catch (Exception e) { rerouting.set(false); ClusterState state = clusterService.state(); - logger.warn("failed to reroute routing table, current state:\n{}", e, state.prettyPrint()); + logger.warn((Supplier) () -> new ParameterizedMessage("failed to reroute routing table, current state:\n{}", state.prettyPrint()), e); } } } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java index 2ca165308b1..2d960ce0450 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java @@ -27,8 +27,8 @@ import org.elasticsearch.cluster.Diffable; import org.elasticsearch.cluster.DiffableUtils; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.common.Nullable; import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -88,6 +88,11 @@ public class RoutingTable implements Iterable, Diffable nodes = new HashMap<>(); private final RoutingAllocation allocation; private final RoutingNodes routingNodes; @@ -219,7 +219,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards private final MetaData metaData; private final float avgShardsPerNode; - public Balancer(ESLogger logger, RoutingAllocation allocation, WeightFunction weight, float threshold) { + public Balancer(Logger logger, RoutingAllocation allocation, WeightFunction weight, float threshold) { this.logger = logger; this.allocation = allocation; this.weight = weight; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index 30de708b1e1..b64b74cc9cb 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -78,11 +78,10 @@ public class DiskThresholdDecider extends AllocationDecider { * Returns the size of all shards that are currently being relocated to * the node, but may not be finished transferring yet. * - * If subtractShardsMovingAway is set then the size of shards moving away is subtracted from the total size - * of all shards + * If subtractShardsMovingAway is true then the size of shards moving away is subtracted from the total size of all shards */ static long sizeOfRelocatingShards(RoutingNode node, RoutingAllocation allocation, - boolean subtractShardsMovingAway, String dataPath) { + boolean subtractShardsMovingAway, String dataPath) { ClusterInfo clusterInfo = allocation.clusterInfo(); long totalSize = 0; for (ShardRouting routing : node.shardsWithState(ShardRoutingState.RELOCATING, ShardRoutingState.INITIALIZING)) { @@ -111,7 +110,9 @@ public class DiskThresholdDecider extends AllocationDecider { final double usedDiskThresholdLow = 100.0 - diskThresholdSettings.getFreeDiskThresholdLow(); final double usedDiskThresholdHigh = 100.0 - diskThresholdSettings.getFreeDiskThresholdHigh(); - DiskUsage usage = getDiskUsage(node, allocation, usages); + // subtractLeavingShards is passed as false here, because they still use disk space, and therefore should we should be extra careful + // and take the size into account + DiskUsage usage = getDiskUsage(node, allocation, usages, false); // First, check that the node currently over the low watermark double freeDiskPercentage = usage.getFreeDiskAsPercentage(); // Cache the used disk percentage for displaying disk percentages consistent with documentation @@ -243,7 +244,9 @@ public class DiskThresholdDecider extends AllocationDecider { return decision; } - final DiskUsage usage = getDiskUsage(node, allocation, usages); + // subtractLeavingShards is passed as true here, since this is only for shards remaining, we will *eventually* have enough disk + // since shards are moving away. No new shards will be incoming since in canAllocate we pass false for this check. + final DiskUsage usage = getDiskUsage(node, allocation, usages, true); final String dataPath = clusterInfo.getDataPath(shardRouting); // If this node is already above the high threshold, the shard cannot remain (get it off!) final double freeDiskPercentage = usage.getFreeDiskAsPercentage(); @@ -280,7 +283,8 @@ public class DiskThresholdDecider extends AllocationDecider { "there is enough disk on this node for the shard to remain, free: [%s]", new ByteSizeValue(freeBytes)); } - private DiskUsage getDiskUsage(RoutingNode node, RoutingAllocation allocation, ImmutableOpenMap usages) { + private DiskUsage getDiskUsage(RoutingNode node, RoutingAllocation allocation, + ImmutableOpenMap usages, boolean subtractLeavingShards) { DiskUsage usage = usages.get(node.nodeId()); if (usage == null) { // If there is no usage, and we have other nodes in the cluster, @@ -293,7 +297,7 @@ public class DiskThresholdDecider extends AllocationDecider { } if (diskThresholdSettings.includeRelocations()) { - long relocatingShardsSize = sizeOfRelocatingShards(node, allocation, true, usage.getPath()); + long relocatingShardsSize = sizeOfRelocatingShards(node, allocation, subtractLeavingShards, usage.getPath()); DiskUsage usageIncludingRelocations = new DiskUsage(node.nodeId(), node.node().getName(), usage.getPath(), usage.getTotalBytes(), usage.getFreeBytes() - relocatingShardsSize); if (logger.isTraceEnabled()) { diff --git a/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java b/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java index 21e2defd9b0..e981313f402 100644 --- a/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java +++ b/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java @@ -19,6 +19,9 @@ package org.elasticsearch.cluster.service; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.AckedClusterStateTaskListener; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; @@ -43,7 +46,6 @@ import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; @@ -554,9 +556,16 @@ public class ClusterService extends AbstractLifecycleComponent { } catch (Exception e) { TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS))); if (logger.isTraceEnabled()) { - logger.trace("failed to execute cluster state update in [{}], state:\nversion [{}], source [{}]\n{}{}{}", e, executionTime, - previousClusterState.version(), tasksSummary, previousClusterState.nodes().prettyPrint(), - previousClusterState.routingTable().prettyPrint(), previousClusterState.getRoutingNodes().prettyPrint()); + logger.trace( + (Supplier) () -> new ParameterizedMessage( + "failed to execute cluster state update in [{}], state:\nversion [{}], source [{}]\n{}{}{}", + executionTime, + previousClusterState.version(), + tasksSummary, + previousClusterState.nodes().prettyPrint(), + previousClusterState.routingTable().prettyPrint(), + previousClusterState.getRoutingNodes().prettyPrint()), + e); } warnAboutSlowTaskIfNeeded(executionTime, tasksSummary); batchResult = ClusterStateTaskExecutor.BatchResult.builder() @@ -587,7 +596,9 @@ public class ClusterService extends AbstractLifecycleComponent { executionResult.handle( () -> proccessedListeners.add(updateTask), ex -> { - logger.debug("cluster state update task {} failed", ex, updateTask.toString(executor)); + logger.debug( + (Supplier) + () -> new ParameterizedMessage("cluster state update task {} failed", updateTask.toString(executor)), ex); updateTask.listener.onFailure(updateTask.source, ex); } ); @@ -670,7 +681,11 @@ public class ClusterService extends AbstractLifecycleComponent { try { clusterStatePublisher.accept(clusterChangedEvent, ackListener); } catch (Discovery.FailedToCommitClusterStateException t) { - logger.warn("failing [{}]: failed to commit cluster state version [{}]", t, tasksSummary, newClusterState.version()); + final long version = newClusterState.version(); + logger.warn( + (Supplier) () -> new ParameterizedMessage( + "failing [{}]: failed to commit cluster state version [{}]", tasksSummary, version), + t); proccessedListeners.forEach(task -> task.listener.onFailure(task.source, t)); return; } @@ -713,7 +728,10 @@ public class ClusterService extends AbstractLifecycleComponent { try { ackListener.onNodeAck(newClusterState.nodes().getLocalNode(), null); } catch (Exception e) { - logger.debug("error while processing ack for master node [{}]", e, newClusterState.nodes().getLocalNode()); + final DiscoveryNode localNode = newClusterState.nodes().getLocalNode(); + logger.debug( + (Supplier) () -> new ParameterizedMessage("error while processing ack for master node [{}]", localNode), + e); } } @@ -724,7 +742,11 @@ public class ClusterService extends AbstractLifecycleComponent { try { executor.clusterStatePublished(clusterChangedEvent); } catch (Exception e) { - logger.error("exception thrown while notifying executor of new cluster state publication [{}]", e, tasksSummary); + logger.error( + (Supplier) () -> new ParameterizedMessage( + "exception thrown while notifying executor of new cluster state publication [{}]", + tasksSummary), + e); } TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS))); @@ -733,8 +755,18 @@ public class ClusterService extends AbstractLifecycleComponent { warnAboutSlowTaskIfNeeded(executionTime, tasksSummary); } catch (Exception e) { TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS))); - logger.warn("failed to apply updated cluster state in [{}]:\nversion [{}], uuid [{}], source [{}]\n{}", e, executionTime, - newClusterState.version(), newClusterState.stateUUID(), tasksSummary, newClusterState.prettyPrint()); + final long version = newClusterState.version(); + final String stateUUID = newClusterState.stateUUID(); + final String prettyPrint = newClusterState.prettyPrint(); + logger.warn( + (Supplier) () -> new ParameterizedMessage( + "failed to apply updated cluster state in [{}]:\nversion [{}], uuid [{}], source [{}]\n{}", + executionTime, + version, + stateUUID, + tasksSummary, + prettyPrint), + e); // TODO: do we want to call updateTask.onFailure here? } @@ -743,7 +775,7 @@ public class ClusterService extends AbstractLifecycleComponent { // this one is overridden in tests so we can control time protected long currentTimeInNanos() {return System.nanoTime();} - private static SafeClusterStateTaskListener safe(ClusterStateTaskListener listener, ESLogger logger) { + private static SafeClusterStateTaskListener safe(ClusterStateTaskListener listener, Logger logger) { if (listener instanceof AckedClusterStateTaskListener) { return new SafeAckedClusterStateTaskListener((AckedClusterStateTaskListener) listener, logger); } else { @@ -753,9 +785,9 @@ public class ClusterService extends AbstractLifecycleComponent { private static class SafeClusterStateTaskListener implements ClusterStateTaskListener { private final ClusterStateTaskListener listener; - private final ESLogger logger; + private final Logger logger; - public SafeClusterStateTaskListener(ClusterStateTaskListener listener, ESLogger logger) { + public SafeClusterStateTaskListener(ClusterStateTaskListener listener, Logger logger) { this.listener = listener; this.logger = logger; } @@ -766,7 +798,9 @@ public class ClusterService extends AbstractLifecycleComponent { listener.onFailure(source, e); } catch (Exception inner) { inner.addSuppressed(e); - logger.error("exception thrown by listener notifying of failure from [{}]", inner, source); + logger.error( + (Supplier) () -> new ParameterizedMessage( + "exception thrown by listener notifying of failure from [{}]", source), inner); } } @@ -775,7 +809,9 @@ public class ClusterService extends AbstractLifecycleComponent { try { listener.onNoLongerMaster(source); } catch (Exception e) { - logger.error("exception thrown by listener while notifying no longer master from [{}]", e, source); + logger.error( + (Supplier) () -> new ParameterizedMessage( + "exception thrown by listener while notifying no longer master from [{}]", source), e); } } @@ -785,21 +821,22 @@ public class ClusterService extends AbstractLifecycleComponent { listener.clusterStateProcessed(source, oldState, newState); } catch (Exception e) { logger.error( + (Supplier) () -> new ParameterizedMessage( "exception thrown by listener while notifying of cluster state processed from [{}], old cluster state:\n" + - "{}\nnew cluster state:\n{}", - e, + "{}\nnew cluster state:\n{}", source, oldState.prettyPrint(), - newState.prettyPrint()); + newState.prettyPrint()), + e); } } } private static class SafeAckedClusterStateTaskListener extends SafeClusterStateTaskListener implements AckedClusterStateTaskListener { private final AckedClusterStateTaskListener listener; - private final ESLogger logger; + private final Logger logger; - public SafeAckedClusterStateTaskListener(AckedClusterStateTaskListener listener, ESLogger logger) { + public SafeAckedClusterStateTaskListener(AckedClusterStateTaskListener listener, Logger logger) { super(listener, logger); this.listener = listener; this.logger = logger; @@ -996,7 +1033,7 @@ public class ClusterService extends AbstractLifecycleComponent { private static class AckCountDownListener implements Discovery.AckListener { - private static final ESLogger logger = Loggers.getLogger(AckCountDownListener.class); + private static final Logger logger = Loggers.getLogger(AckCountDownListener.class); private final AckedClusterStateTaskListener ackedTaskListener; private final CountDown countDown; @@ -1040,7 +1077,10 @@ public class ClusterService extends AbstractLifecycleComponent { logger.trace("ack received from node [{}], cluster_state update (version: {})", node, clusterStateVersion); } else { this.lastFailure = e; - logger.debug("ack received from node [{}], cluster_state update (version: {})", e, node, clusterStateVersion); + logger.debug( + (Supplier) () -> new ParameterizedMessage( + "ack received from node [{}], cluster_state update (version: {})", node, clusterStateVersion), + e); } if (countDown.countDown()) { diff --git a/core/src/main/java/org/elasticsearch/common/SuppressLoggerChecks.java b/core/src/main/java/org/elasticsearch/common/SuppressLoggerChecks.java index c6f23f72f96..dc6a88447b7 100644 --- a/core/src/main/java/org/elasticsearch/common/SuppressLoggerChecks.java +++ b/core/src/main/java/org/elasticsearch/common/SuppressLoggerChecks.java @@ -16,13 +16,14 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.common; +package org.elasticsearch.common; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; + /** * Annotation to suppress logging usage checks errors inside a whole class or a method. */ diff --git a/core/src/main/java/org/elasticsearch/common/breaker/ChildMemoryCircuitBreaker.java b/core/src/main/java/org/elasticsearch/common/breaker/ChildMemoryCircuitBreaker.java index e6c67dbe9ba..68bf52e9e0d 100644 --- a/core/src/main/java/org/elasticsearch/common/breaker/ChildMemoryCircuitBreaker.java +++ b/core/src/main/java/org/elasticsearch/common/breaker/ChildMemoryCircuitBreaker.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.breaker; -import org.elasticsearch.common.logging.ESLogger; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.indices.breaker.BreakerSettings; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; @@ -36,7 +36,7 @@ public class ChildMemoryCircuitBreaker implements CircuitBreaker { private final double overheadConstant; private final AtomicLong used; private final AtomicLong trippedCount; - private final ESLogger logger; + private final Logger logger; private final HierarchyCircuitBreakerService parent; private final String name; @@ -48,7 +48,7 @@ public class ChildMemoryCircuitBreaker implements CircuitBreaker { * @param parent parent circuit breaker service to delegate tripped breakers to * @param name the name of the breaker */ - public ChildMemoryCircuitBreaker(BreakerSettings settings, ESLogger logger, + public ChildMemoryCircuitBreaker(BreakerSettings settings, Logger logger, HierarchyCircuitBreakerService parent, String name) { this(settings, null, logger, parent, name); } @@ -64,7 +64,7 @@ public class ChildMemoryCircuitBreaker implements CircuitBreaker { * @param oldBreaker the previous circuit breaker to inherit the used value from (starting offset) */ public ChildMemoryCircuitBreaker(BreakerSettings settings, ChildMemoryCircuitBreaker oldBreaker, - ESLogger logger, HierarchyCircuitBreakerService parent, String name) { + Logger logger, HierarchyCircuitBreakerService parent, String name) { this.name = name; this.settings = settings; this.memoryBytesLimit = settings.getLimit(); diff --git a/core/src/main/java/org/elasticsearch/common/breaker/MemoryCircuitBreaker.java b/core/src/main/java/org/elasticsearch/common/breaker/MemoryCircuitBreaker.java index 7dbdd7d6a6e..3ac4a52994d 100644 --- a/core/src/main/java/org/elasticsearch/common/breaker/MemoryCircuitBreaker.java +++ b/core/src/main/java/org/elasticsearch/common/breaker/MemoryCircuitBreaker.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.common.breaker; -import org.elasticsearch.common.logging.ESLogger; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.unit.ByteSizeValue; import java.util.concurrent.atomic.AtomicLong; @@ -33,7 +33,7 @@ public class MemoryCircuitBreaker implements CircuitBreaker { private final double overheadConstant; private final AtomicLong used; private final AtomicLong trippedCount; - private final ESLogger logger; + private final Logger logger; /** @@ -43,7 +43,7 @@ public class MemoryCircuitBreaker implements CircuitBreaker { * @param limit circuit breaker limit * @param overheadConstant constant multiplier for byte estimations */ - public MemoryCircuitBreaker(ByteSizeValue limit, double overheadConstant, ESLogger logger) { + public MemoryCircuitBreaker(ByteSizeValue limit, double overheadConstant, Logger logger) { this(limit, overheadConstant, null, logger); } @@ -56,7 +56,7 @@ public class MemoryCircuitBreaker implements CircuitBreaker { * @param overheadConstant constant multiplier for byte estimations * @param oldBreaker the previous circuit breaker to inherit the used value from (starting offset) */ - public MemoryCircuitBreaker(ByteSizeValue limit, double overheadConstant, MemoryCircuitBreaker oldBreaker, ESLogger logger) { + public MemoryCircuitBreaker(ByteSizeValue limit, double overheadConstant, MemoryCircuitBreaker oldBreaker, Logger logger) { this.memoryBytesLimit = limit.bytes(); this.overheadConstant = overheadConstant; if (oldBreaker == null) { diff --git a/core/src/main/java/org/elasticsearch/common/component/AbstractComponent.java b/core/src/main/java/org/elasticsearch/common/component/AbstractComponent.java index fa49a80123d..8cb51f2b06b 100644 --- a/core/src/main/java/org/elasticsearch/common/component/AbstractComponent.java +++ b/core/src/main/java/org/elasticsearch/common/component/AbstractComponent.java @@ -19,19 +19,17 @@ package org.elasticsearch.common.component; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.node.Node; -/** - * - */ public abstract class AbstractComponent { - protected final ESLogger logger; + protected final Logger logger; protected final DeprecationLogger deprecationLogger; protected final Settings settings; @@ -42,7 +40,7 @@ public abstract class AbstractComponent { } public AbstractComponent(Settings settings, Class customClass) { - this.logger = Loggers.getLogger(customClass, settings); + this.logger = LogManager.getLogger(customClass); this.deprecationLogger = new DeprecationLogger(logger); this.settings = settings; } @@ -71,4 +69,5 @@ public abstract class AbstractComponent { deprecationLogger.deprecated("Setting [{}] has been removed, use [{}] instead", settingName, alternativeName); } } + } diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java index 84d02eb4ec7..cb2f8bb4e78 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java @@ -19,20 +19,15 @@ package org.elasticsearch.common.geo.builders; -import org.locationtech.spatial4j.context.jts.JtsSpatialContext; -import org.locationtech.spatial4j.exception.InvalidShapeException; -import org.locationtech.spatial4j.shape.Shape; -import org.locationtech.spatial4j.shape.jts.JtsGeometry; import com.vividsolutions.jts.geom.Coordinate; import com.vividsolutions.jts.geom.Geometry; import com.vividsolutions.jts.geom.GeometryFactory; - +import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.support.ToXContentToBytes; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.unit.DistanceUnit.Distance; import org.elasticsearch.common.xcontent.ToXContent; @@ -40,6 +35,10 @@ import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.GeoShapeFieldMapper; +import org.locationtech.spatial4j.context.jts.JtsSpatialContext; +import org.locationtech.spatial4j.exception.InvalidShapeException; +import org.locationtech.spatial4j.shape.Shape; +import org.locationtech.spatial4j.shape.jts.JtsGeometry; import java.io.IOException; import java.util.ArrayList; @@ -53,7 +52,7 @@ import java.util.Locale; */ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWriteable { - protected static final ESLogger LOGGER = ESLoggerFactory.getLogger(ShapeBuilder.class.getName()); + protected static final Logger LOGGER = ESLoggerFactory.getLogger(ShapeBuilder.class.getName()); private static final boolean DEBUG; static { diff --git a/core/src/main/java/org/elasticsearch/common/inject/spi/Elements.java b/core/src/main/java/org/elasticsearch/common/inject/spi/Elements.java index 30911def551..bfb084dd478 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/spi/Elements.java +++ b/core/src/main/java/org/elasticsearch/common/inject/spi/Elements.java @@ -16,6 +16,7 @@ package org.elasticsearch.common.inject.spi; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.Binder; import org.elasticsearch.common.inject.Binding; @@ -40,7 +41,6 @@ import org.elasticsearch.common.inject.internal.PrivateElementsImpl; import org.elasticsearch.common.inject.internal.ProviderMethodsModule; import org.elasticsearch.common.inject.internal.SourceProvider; import org.elasticsearch.common.inject.matcher.Matcher; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import java.lang.annotation.Annotation; @@ -351,7 +351,7 @@ public final class Elements { return builder; } - private static ESLogger logger = Loggers.getLogger(Elements.class); + private static Logger logger = Loggers.getLogger(Elements.class); protected Object getSource() { Object ret; diff --git a/core/src/main/java/org/elasticsearch/common/io/FileSystemUtils.java b/core/src/main/java/org/elasticsearch/common/io/FileSystemUtils.java index 46175c0f664..943368059d1 100644 --- a/core/src/main/java/org/elasticsearch/common/io/FileSystemUtils.java +++ b/core/src/main/java/org/elasticsearch/common/io/FileSystemUtils.java @@ -19,8 +19,8 @@ package org.elasticsearch.common.io; +import org.apache.logging.log4j.Logger; import org.apache.lucene.util.IOUtils; -import org.elasticsearch.common.logging.ESLogger; import java.io.BufferedReader; import java.io.IOException; @@ -101,7 +101,7 @@ public final class FileSystemUtils { * Check that a directory exists, is a directory and is readable * by the current user */ - public static boolean isAccessibleDirectory(Path directory, ESLogger logger) { + public static boolean isAccessibleDirectory(Path directory, Logger logger) { assert directory != null && logger != null; if (!Files.exists(directory)) { diff --git a/core/src/main/java/org/elasticsearch/common/logging/ConsoleAppender.java b/core/src/main/java/org/elasticsearch/common/logging/ConsoleAppender.java deleted file mode 100644 index 7c33389974f..00000000000 --- a/core/src/main/java/org/elasticsearch/common/logging/ConsoleAppender.java +++ /dev/null @@ -1,260 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.logging; - -import org.apache.log4j.Layout; -import org.apache.log4j.WriterAppender; -import org.apache.log4j.helpers.LogLog; -import org.elasticsearch.common.SuppressForbidden; - -import java.io.IOException; -import java.io.OutputStream; - -/** - * ConsoleAppender appends log events to System.out or - * System.err using a layout specified by the user. The - * default target is System.out. - *

Elasticsearch: Adapter from log4j to allow to disable console logging...

- * - * @author Ceki Gülcü - * @author Curt Arnold - * @since 1.1 - */ -public class ConsoleAppender extends WriterAppender { - - public static final String SYSTEM_OUT = "System.out"; - public static final String SYSTEM_ERR = "System.err"; - - protected String target = SYSTEM_OUT; - - /** - * Determines if the appender honors reassignments of System.out - * or System.err made after configuration. - */ - private boolean follow = true; - - /** - * Constructs an unconfigured appender. - */ - public ConsoleAppender() { - } - - /** - * Creates a configured appender. - * - * @param layout layout, may not be null. - */ - public ConsoleAppender(Layout layout) { - this(layout, SYSTEM_OUT); - } - - /** - * Creates a configured appender. - * - * @param layout layout, may not be null. - * @param target target, either "System.err" or "System.out". - */ - public ConsoleAppender(Layout layout, String target) { - setLayout(layout); - setTarget(target); - activateOptions(); - } - - /** - * Sets the value of the Target option. Recognized values - * are "System.out" and "System.err". Any other value will be - * ignored. - */ - public void setTarget(String value) { - String v = value.trim(); - - if (SYSTEM_OUT.equalsIgnoreCase(v)) { - target = SYSTEM_OUT; - } else if (SYSTEM_ERR.equalsIgnoreCase(v)) { - target = SYSTEM_ERR; - } else { - targetWarn(value); - } - } - - /** - * Returns the current value of the Target property. The - * default value of the option is "System.out". - *

- * See also {@link #setTarget}. - */ - public String getTarget() { - return target; - } - - /** - * Sets whether the appender honors reassignments of System.out - * or System.err made after configuration. - * - * @param newValue if true, appender will use value of System.out or - * System.err in force at the time when logging events are appended. - * @since 1.2.13 - */ - public final void setFollow(final boolean newValue) { - follow = newValue; - } - - /** - * Gets whether the appender honors reassignments of System.out - * or System.err made after configuration. - * - * @return true if appender will use value of System.out or - * System.err in force at the time when logging events are appended. - * @since 1.2.13 - */ - public final boolean getFollow() { - return follow; - } - - void targetWarn(String val) { - LogLog.warn("[" + val + "] should be System.out or System.err."); - LogLog.warn("Using previously set target, System.out by default."); - } - - /** - * Prepares the appender for use. - */ - @Override - @SuppressForbidden(reason = "System#out") - public void activateOptions() { - if (follow) { - if (target.equals(SYSTEM_ERR)) { - setWriter(createWriter(new SystemErrStream())); - } else { - setWriter(createWriter(new SystemOutStream())); - } - } else { - if (target.equals(SYSTEM_ERR)) { - setWriter(createWriter(System.err)); - } else { - setWriter(createWriter(System.out)); - } - } - - super.activateOptions(); - } - - /** - * {@inheritDoc} - */ - @Override - protected - final void closeWriter() { - if (follow) { - super.closeWriter(); - } - } - - - /** - * An implementation of OutputStream that redirects to the - * current System.err. - */ - @SuppressForbidden(reason = "System#err") - private static class SystemErrStream extends OutputStream { - public SystemErrStream() { - } - - @Override - public void close() { - } - - @Override - public void flush() { - System.err.flush(); - } - - @Override - public void write(final byte[] b) throws IOException { - if (!Loggers.consoleLoggingEnabled()) { - return; - } - System.err.write(b); - } - - @Override - - public void write(final byte[] b, final int off, final int len) - throws IOException { - if (!Loggers.consoleLoggingEnabled()) { - return; - } - System.err.write(b, off, len); - } - - @Override - public void write(final int b) throws IOException { - if (!Loggers.consoleLoggingEnabled()) { - return; - } - System.err.write(b); - } - } - - /** - * An implementation of OutputStream that redirects to the - * current System.out. - */ - @SuppressForbidden(reason = "System#err") - private static class SystemOutStream extends OutputStream { - public SystemOutStream() { - } - - @Override - public void close() { - } - - @Override - public void flush() { - System.out.flush(); - } - - @Override - public void write(final byte[] b) throws IOException { - if (!Loggers.consoleLoggingEnabled()) { - return; - } - System.out.write(b); - } - - @Override - public void write(final byte[] b, final int off, final int len) - throws IOException { - if (!Loggers.consoleLoggingEnabled()) { - return; - } - System.out.write(b, off, len); - } - - @Override - public void write(final int b) throws IOException { - if (!Loggers.consoleLoggingEnabled()) { - return; - } - System.out.write(b); - } - } - -} diff --git a/core/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java b/core/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java index afcef98fef7..d9b811585de 100644 --- a/core/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java +++ b/core/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java @@ -19,6 +19,9 @@ package org.elasticsearch.common.logging; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.common.SuppressLoggerChecks; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -31,6 +34,8 @@ import java.util.concurrent.CopyOnWriteArraySet; */ public class DeprecationLogger { + private final Logger logger; + /** * The "Warning" Header comes from RFC-7234. As the RFC describes, it's generally used for caching purposes, but it can be * used for any warning. @@ -84,22 +89,20 @@ public class DeprecationLogger { } } - private final ESLogger logger; - /** * Creates a new deprecation logger based on the parent logger. Automatically * prefixes the logger name with "deprecation", if it starts with "org.elasticsearch.", * it replaces "org.elasticsearch" with "org.elasticsearch.deprecation" to maintain * the "org.elasticsearch" namespace. */ - public DeprecationLogger(ESLogger parentLogger) { + public DeprecationLogger(Logger parentLogger) { String name = parentLogger.getName(); if (name.startsWith("org.elasticsearch")) { name = name.replace("org.elasticsearch.", "org.elasticsearch.deprecation."); } else { name = "deprecation." + name; } - this.logger = ESLoggerFactory.getLogger(parentLogger.getPrefix(), name); + this.logger = LogManager.getLogger(name, parentLogger.getMessageFactory()); } /** @@ -113,29 +116,27 @@ public class DeprecationLogger { * Logs a deprecated message to the deprecation log, as well as to the local {@link ThreadContext}. * * @param threadContexts The node's {@link ThreadContext} (outside of concurrent tests, this should only ever have one context). - * @param msg The deprecation message. + * @param message The deprecation message. * @param params The parameters used to fill in the message, if any exist. */ @SuppressLoggerChecks(reason = "safely delegates to logger") - void deprecated(Set threadContexts, String msg, Object... params) { + void deprecated(Set threadContexts, String message, Object... params) { Iterator iterator = threadContexts.iterator(); if (iterator.hasNext()) { - final String formattedMsg = LoggerMessageFormat.format(msg, params); + final String formattedMessage = LoggerMessageFormat.format(message, params); while (iterator.hasNext()) { try { - iterator.next().addResponseHeader(DEPRECATION_HEADER, formattedMsg); + iterator.next().addResponseHeader(DEPRECATION_HEADER, formattedMessage); } catch (IllegalStateException e) { // ignored; it should be removed shortly } } - - logger.warn(formattedMsg); + logger.warn(formattedMessage); } else { - logger.warn(msg, params); + logger.warn(message, params); } - } } diff --git a/core/src/main/java/org/elasticsearch/common/logging/ESLogger.java b/core/src/main/java/org/elasticsearch/common/logging/ESLogger.java deleted file mode 100644 index b2a2aa333c7..00000000000 --- a/core/src/main/java/org/elasticsearch/common/logging/ESLogger.java +++ /dev/null @@ -1,208 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.logging; - -import org.apache.log4j.Level; -import org.apache.log4j.Logger; -import org.elasticsearch.common.SuppressLoggerChecks; - -import static org.elasticsearch.common.logging.LoggerMessageFormat.format; - -/** - * Elasticsearch's logger wrapper. - */ -@SuppressLoggerChecks(reason = "safely delegates to itself") -public class ESLogger { - private static final String FQCN = ESLogger.class.getName(); - - private final String prefix; - private final Logger logger; - - public ESLogger(String prefix, Logger logger) { - this.prefix = prefix; - this.logger = logger; - } - - /** - * The prefix of the log. - */ - public String getPrefix() { - return this.prefix; - } - - /** - * Fetch the underlying logger so we can look at it. Only exists for testing. - */ - Logger getLogger() { - return logger; - } - - /** - * Set the level of the logger. If the new level is null, the logger will inherit it's level from its nearest ancestor with a non-null - * level. - */ - public void setLevel(String level) { - if (level == null) { - logger.setLevel(null); - } else if ("error".equalsIgnoreCase(level)) { - logger.setLevel(Level.ERROR); - } else if ("warn".equalsIgnoreCase(level)) { - logger.setLevel(Level.WARN); - } else if ("info".equalsIgnoreCase(level)) { - logger.setLevel(Level.INFO); - } else if ("debug".equalsIgnoreCase(level)) { - logger.setLevel(Level.DEBUG); - } else if ("trace".equalsIgnoreCase(level)) { - logger.setLevel(Level.TRACE); - } - } - - /** - * The level of this logger. If null then the logger is inheriting it's level from its nearest ancestor with a non-null level. - */ - public String getLevel() { - if (logger.getLevel() == null) { - return null; - } - return logger.getLevel().toString(); - } - - /** - * The name of this logger. - */ - public String getName() { - return logger.getName(); - } - - /** - * Returns {@code true} if a TRACE level message should be logged. - */ - public boolean isTraceEnabled() { - return logger.isTraceEnabled(); - } - - /** - * Returns {@code true} if a DEBUG level message should be logged. - */ - public boolean isDebugEnabled() { - return logger.isDebugEnabled(); - } - - /** - * Returns {@code true} if an INFO level message should be logged. - */ - public boolean isInfoEnabled() { - return logger.isInfoEnabled(); - } - - /** - * Returns {@code true} if a WARN level message should be logged. - */ - public boolean isWarnEnabled() { - return logger.isEnabledFor(Level.WARN); - } - - /** - * Returns {@code true} if an ERROR level message should be logged. - */ - public boolean isErrorEnabled() { - return logger.isEnabledFor(Level.ERROR); - } - - /** - * Logs a TRACE level message. - */ - public void trace(String msg, Object... params) { - trace(msg, null, params); - } - - /** - * Logs a TRACE level message with an exception. - */ - public void trace(String msg, Throwable cause, Object... params) { - if (isTraceEnabled()) { - logger.log(FQCN, Level.TRACE, format(prefix, msg, params), cause); - } - } - - /** - * Logs a DEBUG level message. - */ - public void debug(String msg, Object... params) { - debug(msg, null, params); - } - - /** - * Logs a DEBUG level message with an exception. - */ - public void debug(String msg, Throwable cause, Object... params) { - if (isDebugEnabled()) { - logger.log(FQCN, Level.DEBUG, format(prefix, msg, params), cause); - } - } - - /** - * Logs a INFO level message. - */ - public void info(String msg, Object... params) { - info(msg, null, params); - } - - /** - * Logs a INFO level message with an exception. - */ - public void info(String msg, Throwable cause, Object... params) { - if (isInfoEnabled()) { - logger.log(FQCN, Level.INFO, format(prefix, msg, params), cause); - } - } - - /** - * Logs a WARN level message. - */ - public void warn(String msg, Object... params) { - warn(msg, null, params); - } - - /** - * Logs a WARN level message with an exception. - */ - public void warn(String msg, Throwable cause, Object... params) { - if (isWarnEnabled()) { - logger.log(FQCN, Level.WARN, format(prefix, msg, params), cause); - } - } - - /** - * Logs a ERROR level message. - */ - public void error(String msg, Object... params) { - error(msg, null, params); - } - - /** - * Logs a ERROR level message with an exception. - */ - public void error(String msg, Throwable cause, Object... params) { - if (isErrorEnabled()) { - logger.log(FQCN, Level.ERROR, format(prefix, msg, params), cause); - } - } -} diff --git a/core/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java b/core/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java index c0951c47df1..853df3d31ad 100644 --- a/core/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java +++ b/core/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java @@ -19,31 +19,46 @@ package org.elasticsearch.common.logging; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.MessageFactory; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import java.util.Locale; +import java.util.function.Function; /** - * Factory to get {@link ESLogger}s + * Factory to get {@link Logger}s */ public abstract class ESLoggerFactory { - public static final Setting LOG_DEFAULT_LEVEL_SETTING = - new Setting<>("logger.level", LogLevel.INFO.name(), LogLevel::parse, Property.NodeScope); - public static final Setting LOG_LEVEL_SETTING = - Setting.prefixKeySetting("logger.", LogLevel.INFO.name(), LogLevel::parse, + public static final Setting LOG_DEFAULT_LEVEL_SETTING = + new Setting<>("logger.level", Level.INFO.name(), Level::valueOf, Property.NodeScope); + public static final Setting LOG_LEVEL_SETTING = + Setting.prefixKeySetting("logger.", Level.INFO.name(), Level::valueOf, Property.Dynamic, Property.NodeScope); - public static ESLogger getLogger(String prefix, String name) { - prefix = prefix == null ? null : prefix.intern(); + public static Logger getLogger(String prefix, String name) { name = name.intern(); - return new ESLogger(prefix, Logger.getLogger(name)); + final Logger logger = getLogger(new PrefixMessageFactory(), name); + final MessageFactory factory = logger.getMessageFactory(); + // in some cases, we initialize the logger before we are ready to set the prefix + // we can not re-initialize the logger, so the above getLogger might return an existing + // instance without the prefix set; thus, we hack around this by resetting the prefix + if (prefix != null && factory instanceof PrefixMessageFactory) { + ((PrefixMessageFactory) factory).setPrefix(prefix.intern()); + } + return logger; } - public static ESLogger getLogger(String name) { - return getLogger(null, name); + public static Logger getLogger(MessageFactory messageFactory, String name) { + return LogManager.getLogger(name, messageFactory); + } + + public static Logger getLogger(String name) { + return getLogger((String)null, name); } public static DeprecationLogger getDeprecationLogger(String name) { @@ -54,18 +69,12 @@ public abstract class ESLoggerFactory { return new DeprecationLogger(getLogger(prefix, name)); } - public static ESLogger getRootLogger() { - return new ESLogger(null, Logger.getRootLogger()); + public static Logger getRootLogger() { + return LogManager.getRootLogger(); } private ESLoggerFactory() { // Utility class can't be built. } - public enum LogLevel { - WARN, TRACE, INFO, DEBUG, ERROR; - public static LogLevel parse(String level) { - return valueOf(level.toUpperCase(Locale.ROOT)); - } - } } diff --git a/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java b/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java index e203999d332..68e2886cf37 100644 --- a/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java +++ b/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java @@ -19,12 +19,21 @@ package org.elasticsearch.common.logging; -import org.apache.log4j.Java9Hack; -import org.apache.log4j.PropertyConfigurator; -import org.apache.lucene.util.Constants; -import org.elasticsearch.ElasticsearchException; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.core.LoggerContext; +import org.apache.logging.log4j.core.config.AbstractConfiguration; +import org.apache.logging.log4j.core.config.Configurator; +import org.apache.logging.log4j.core.config.builder.api.ConfigurationBuilder; +import org.apache.logging.log4j.core.config.builder.api.ConfigurationBuilderFactory; +import org.apache.logging.log4j.core.config.builder.impl.BuiltConfiguration; +import org.apache.logging.log4j.core.config.composite.CompositeConfiguration; +import org.apache.logging.log4j.core.config.properties.PropertiesConfiguration; +import org.apache.logging.log4j.core.config.properties.PropertiesConfigurationFactory; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.env.Environment; import java.io.IOException; @@ -34,144 +43,82 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.SimpleFileVisitor; import java.nio.file.attribute.BasicFileAttributes; +import java.util.ArrayList; import java.util.Arrays; import java.util.EnumSet; -import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Properties; import java.util.Set; -import static java.util.Collections.unmodifiableMap; -import static org.elasticsearch.common.Strings.cleanPath; - -/** - * Configures log4j with a special set of replacements. - */ public class LogConfigurator { - static final List ALLOWED_SUFFIXES = Arrays.asList(".yml", ".yaml", ".json", ".properties"); + public static void configure(final Environment environment, final boolean resolveConfig) throws IOException { + final Settings settings = environment.settings(); - private static final Map REPLACEMENTS; - static { - Map replacements = new HashMap<>(); - // Appenders - replacements.put("async", "org.apache.log4j.AsyncAppender"); - replacements.put("console", ConsoleAppender.class.getName()); - replacements.put("dailyRollingFile", "org.apache.log4j.DailyRollingFileAppender"); - replacements.put("externallyRolledFile", "org.apache.log4j.ExternallyRolledFileAppender"); - replacements.put("extrasRollingFile", "org.apache.log4j.rolling.RollingFileAppender"); - replacements.put("file", "org.apache.log4j.FileAppender"); - replacements.put("jdbc", "org.apache.log4j.jdbc.JDBCAppender"); - replacements.put("jms", "org.apache.log4j.net.JMSAppender"); - replacements.put("lf5", "org.apache.log4j.lf5.LF5Appender"); - replacements.put("ntevent", "org.apache.log4j.nt.NTEventLogAppender"); - replacements.put("null", "org.apache.log4j.NullAppender"); - replacements.put("rollingFile", "org.apache.log4j.RollingFileAppender"); - replacements.put("smtp", "org.apache.log4j.net.SMTPAppender"); - replacements.put("socket", "org.apache.log4j.net.SocketAppender"); - replacements.put("socketHub", "org.apache.log4j.net.SocketHubAppender"); - replacements.put("syslog", "org.apache.log4j.net.SyslogAppender"); - replacements.put("telnet", "org.apache.log4j.net.TelnetAppender"); - replacements.put("terminal", TerminalAppender.class.getName()); + setLogConfigurationSystemProperty(environment, settings); - // Policies - replacements.put("timeBased", "org.apache.log4j.rolling.TimeBasedRollingPolicy"); - replacements.put("sizeBased", "org.apache.log4j.rolling.SizeBasedTriggeringPolicy"); + // we initialize the status logger immediately otherwise Log4j will complain when we try to get the context + final ConfigurationBuilder builder = ConfigurationBuilderFactory.newConfigurationBuilder(); + builder.setStatusLevel(Level.ERROR); + Configurator.initialize(builder.build()); - // Layouts - replacements.put("simple", "org.apache.log4j.SimpleLayout"); - replacements.put("html", "org.apache.log4j.HTMLLayout"); - replacements.put("pattern", "org.apache.log4j.PatternLayout"); - replacements.put("consolePattern", "org.apache.log4j.PatternLayout"); - replacements.put("enhancedPattern", "org.apache.log4j.EnhancedPatternLayout"); - replacements.put("ttcc", "org.apache.log4j.TTCCLayout"); - replacements.put("xml", "org.apache.log4j.XMLLayout"); - REPLACEMENTS = unmodifiableMap(replacements); + final LoggerContext context = (LoggerContext) LogManager.getContext(false); - if (Constants.JRE_IS_MINIMUM_JAVA9) { - Java9Hack.fixLog4j(); - } - } - - private static boolean loaded; - - /** - * Consolidates settings and converts them into actual log4j settings, then initializes loggers and appenders. - * @param settings custom settings that should be applied - * @param resolveConfig controls whether the logging conf file should be read too or not. - */ - public static void configure(Settings settings, boolean resolveConfig) { - if (loaded) { - return; - } - loaded = true; - // TODO: this is partly a copy of InternalSettingsPreparer...we should pass in Environment and not do all this... - Environment environment = new Environment(settings); - - Settings.Builder settingsBuilder = Settings.builder(); if (resolveConfig) { - resolveConfig(environment, settingsBuilder); - } - - // add custom settings after config was added so that they are not overwritten by config - settingsBuilder.put(settings); - settingsBuilder.replacePropertyPlaceholders(); - Properties props = new Properties(); - for (Map.Entry entry : settingsBuilder.build().getAsMap().entrySet()) { - String key = "log4j." + entry.getKey(); - String value = entry.getValue(); - value = REPLACEMENTS.getOrDefault(value, value); - if (key.endsWith(".value")) { - props.setProperty(key.substring(0, key.length() - ".value".length()), value); - } else if (key.endsWith(".type")) { - props.setProperty(key.substring(0, key.length() - ".type".length()), value); - } else { - props.setProperty(key, value); - } - } - // ensure explicit path to logs dir exists - props.setProperty("log4j.path.logs", cleanPath(environment.logsFile().toAbsolutePath().toString())); - PropertyConfigurator.configure(props); - } - - /** - * sets the loaded flag to false so that logging configuration can be - * overridden. Should only be used in tests. - */ - static void reset() { - loaded = false; - } - - static void resolveConfig(Environment env, final Settings.Builder settingsBuilder) { - - try { - Set options = EnumSet.of(FileVisitOption.FOLLOW_LINKS); - Files.walkFileTree(env.configFile(), options, Integer.MAX_VALUE, new SimpleFileVisitor() { + final List configurations = new ArrayList<>(); + final PropertiesConfigurationFactory factory = new PropertiesConfigurationFactory(); + final Set options = EnumSet.of(FileVisitOption.FOLLOW_LINKS); + Files.walkFileTree(environment.configFile(), options, Integer.MAX_VALUE, new SimpleFileVisitor() { @Override public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { - String fileName = file.getFileName().toString(); - if (fileName.startsWith("logging.")) { - for (String allowedSuffix : ALLOWED_SUFFIXES) { - if (fileName.endsWith(allowedSuffix)) { - loadConfig(file, settingsBuilder); - break; - } - } + if (file.getFileName().toString().equals("log4j2.properties")) { + configurations.add((PropertiesConfiguration) factory.getConfiguration(file.toString(), file.toUri())); } return FileVisitResult.CONTINUE; } }); - } catch (IOException ioe) { - throw new ElasticsearchException("Failed to load logging configuration", ioe); + context.start(new CompositeConfiguration(configurations)); + warnIfOldConfigurationFilePresent(environment); + } + + if (ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.exists(settings)) { + Loggers.setLevel(ESLoggerFactory.getRootLogger(), ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.get(settings)); + } + + final Map levels = settings.filter(ESLoggerFactory.LOG_LEVEL_SETTING::match).getAsMap(); + for (String key : levels.keySet()) { + final Level level = ESLoggerFactory.LOG_LEVEL_SETTING.getConcreteSetting(key).get(settings); + Loggers.setLevel(Loggers.getLogger(key.substring("logger.".length())), level); } } - static void loadConfig(Path file, Settings.Builder settingsBuilder) { - try { - settingsBuilder.loadFromPath(file); - } catch (IOException | SettingsException | NoClassDefFoundError e) { - // ignore - } + private static void warnIfOldConfigurationFilePresent(final Environment environment) throws IOException { + // TODO: the warning for unsupported logging configurations can be removed in 6.0.0 + assert Version.CURRENT.major < 6; + final List suffixes = Arrays.asList(".yml", ".yaml", ".json", ".properties"); + final Set options = EnumSet.of(FileVisitOption.FOLLOW_LINKS); + Files.walkFileTree(environment.configFile(), options, Integer.MAX_VALUE, new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + final String fileName = file.getFileName().toString(); + if (fileName.startsWith("logging")) { + for (final String suffix : suffixes) { + if (fileName.endsWith(suffix)) { + Loggers.getLogger(LogConfigurator.class).warn( + "ignoring unsupported logging configuration file [{}], logging is configured via [{}]", + file.toString(), + file.getParent().resolve("log4j2.properties")); + } + } + } + return FileVisitResult.CONTINUE; + } + }); } + + @SuppressForbidden(reason = "sets system property for logging configuration") + private static void setLogConfigurationSystemProperty(final Environment environment, final Settings settings) { + System.setProperty("es.logs", environment.logsFile().resolve(ClusterName.CLUSTER_NAME_SETTING.get(settings).value()).toString()); + } + } diff --git a/core/src/main/java/org/elasticsearch/common/logging/Loggers.java b/core/src/main/java/org/elasticsearch/common/logging/Loggers.java index 4a938e38a2e..ddca741389c 100644 --- a/core/src/main/java/org/elasticsearch/common/logging/Loggers.java +++ b/core/src/main/java/org/elasticsearch/common/logging/Loggers.java @@ -19,15 +19,19 @@ package org.elasticsearch.common.logging; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.LoggerContext; +import org.apache.logging.log4j.core.config.Configuration; +import org.apache.logging.log4j.core.config.Configurator; +import org.apache.logging.log4j.core.config.LoggerConfig; +import org.apache.logging.log4j.message.MessageFactory; import org.elasticsearch.common.Classes; -import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.node.Node; -import java.net.InetAddress; -import java.net.UnknownHostException; import java.util.ArrayList; import java.util.List; @@ -57,51 +61,29 @@ public class Loggers { return consoleLoggingEnabled; } - public static ESLogger getLogger(Class clazz, Settings settings, ShardId shardId, String... prefixes) { + public static Logger getLogger(Class clazz, Settings settings, ShardId shardId, String... prefixes) { return getLogger(clazz, settings, shardId.getIndex(), asArrayList(Integer.toString(shardId.id()), prefixes).toArray(new String[0])); } /** - * Just like {@link #getLogger(Class, org.elasticsearch.common.settings.Settings,ShardId,String...)} but String loggerName instead of + * Just like {@link #getLogger(Class, org.elasticsearch.common.settings.Settings, ShardId, String...)} but String loggerName instead of * Class. */ - public static ESLogger getLogger(String loggerName, Settings settings, ShardId shardId, String... prefixes) { + public static Logger getLogger(String loggerName, Settings settings, ShardId shardId, String... prefixes) { return getLogger(loggerName, settings, - asArrayList(shardId.getIndexName(), Integer.toString(shardId.id()), prefixes).toArray(new String[0])); + asArrayList(shardId.getIndexName(), Integer.toString(shardId.id()), prefixes).toArray(new String[0])); } - public static ESLogger getLogger(Class clazz, Settings settings, Index index, String... prefixes) { + public static Logger getLogger(Class clazz, Settings settings, Index index, String... prefixes) { return getLogger(clazz, settings, asArrayList(SPACE, index.getName(), prefixes).toArray(new String[0])); } - public static ESLogger getLogger(Class clazz, Settings settings, String... prefixes) { + public static Logger getLogger(Class clazz, Settings settings, String... prefixes) { return getLogger(buildClassLoggerName(clazz), settings, prefixes); } - @SuppressForbidden(reason = "using localhost for logging on which host it is is fine") - private static InetAddress getHostAddress() { - try { - return InetAddress.getLocalHost(); - } catch (UnknownHostException e) { - return null; - } - } - - @SuppressForbidden(reason = "do not know what this method does") - public static ESLogger getLogger(String loggerName, Settings settings, String... prefixes) { + public static Logger getLogger(String loggerName, Settings settings, String... prefixes) { List prefixesList = new ArrayList<>(); - if (settings.getAsBoolean("logger.logHostAddress", false)) { - final InetAddress addr = getHostAddress(); - if (addr != null) { - prefixesList.add(addr.getHostAddress()); - } - } - if (settings.getAsBoolean("logger.logHostName", false)) { - final InetAddress addr = getHostAddress(); - if (addr != null) { - prefixesList.add(addr.getHostName()); - } - } if (Node.NODE_NAME_SETTING.exists(settings)) { prefixesList.add(Node.NODE_NAME_SETTING.get(settings)); } @@ -111,23 +93,23 @@ public class Loggers { return getLogger(getLoggerName(loggerName), prefixesList.toArray(new String[prefixesList.size()])); } - public static ESLogger getLogger(ESLogger parentLogger, String s) { - return ESLoggerFactory.getLogger(parentLogger.getPrefix(), getLoggerName(parentLogger.getName() + s)); + public static Logger getLogger(Logger parentLogger, String s) { + return ESLoggerFactory.getLogger(parentLogger.getMessageFactory(), getLoggerName(parentLogger.getName() + s)); } - public static ESLogger getLogger(String s) { + public static Logger getLogger(String s) { return ESLoggerFactory.getLogger(getLoggerName(s)); } - public static ESLogger getLogger(Class clazz) { + public static Logger getLogger(Class clazz) { return ESLoggerFactory.getLogger(getLoggerName(buildClassLoggerName(clazz))); } - public static ESLogger getLogger(Class clazz, String... prefixes) { + public static Logger getLogger(Class clazz, String... prefixes) { return getLogger(buildClassLoggerName(clazz), prefixes); } - public static ESLogger getLogger(String name, String... prefixes) { + public static Logger getLogger(String name, String... prefixes) { String prefix = null; if (prefixes != null && prefixes.length > 0) { StringBuilder sb = new StringBuilder(); @@ -148,6 +130,32 @@ public class Loggers { return ESLoggerFactory.getLogger(prefix, getLoggerName(name)); } + /** + * Set the level of the logger. If the new level is null, the logger will inherit it's level from its nearest ancestor with a non-null + * level. + */ + public static void setLevel(Logger logger, String level) { + final Level l; + if (level == null) { + l = null; + } else { + l = Level.valueOf(level); + } + setLevel(logger, l); + } + + public static void setLevel(Logger logger, Level level) { + if (!"".equals(logger.getName())) { + Configurator.setLevel(logger.getName(), level); + } else { + LoggerContext ctx = LoggerContext.getContext(false); + Configuration config = ctx.getConfiguration(); + LoggerConfig loggerConfig = config.getLoggerConfig(logger.getName()); + loggerConfig.setLevel(level); + ctx.updateLoggers(); + } + } + private static String buildClassLoggerName(Class clazz) { String name = clazz.getName(); if (name.startsWith("org.elasticsearch.")) { @@ -162,4 +170,5 @@ public class Loggers { } return commonPrefix + name; } + } diff --git a/core/src/main/java/org/elasticsearch/common/logging/PrefixMessageFactory.java b/core/src/main/java/org/elasticsearch/common/logging/PrefixMessageFactory.java new file mode 100644 index 00000000000..a141ceb75aa --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/logging/PrefixMessageFactory.java @@ -0,0 +1,221 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.logging; + +import org.apache.logging.log4j.message.Message; +import org.apache.logging.log4j.message.MessageFactory2; +import org.apache.logging.log4j.message.ObjectMessage; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.message.SimpleMessage; + +public class PrefixMessageFactory implements MessageFactory2 { + + private String prefix = ""; + + public String getPrefix() { + return prefix; + } + + public void setPrefix(String prefix) { + this.prefix = prefix; + } + + @Override + public Message newMessage(Object message) { + return new PrefixObjectMessage(prefix, message); + } + + private static class PrefixObjectMessage extends ObjectMessage { + + private final String prefix; + private final Object object; + private String prefixObjectString; + + private PrefixObjectMessage(String prefix, Object object) { + super(object); + this.prefix = prefix; + this.object = object; + } + + @Override + public String getFormattedMessage() { + if (prefixObjectString == null) { + prefixObjectString = prefix + super.getFormattedMessage(); + } + return prefixObjectString; + } + + @Override + public void formatTo(StringBuilder buffer) { + buffer.append(prefix); + super.formatTo(buffer); + } + + @Override + public Object[] getParameters() { + return new Object[]{prefix, object}; + } + + } + + @Override + public Message newMessage(String message) { + return new PrefixSimpleMessage(prefix, message); + } + + private static class PrefixSimpleMessage extends SimpleMessage { + + private final String prefix; + private String prefixMessage; + + PrefixSimpleMessage(String prefix, String message) { + super(message); + this.prefix = prefix; + } + + PrefixSimpleMessage(String prefix, CharSequence charSequence) { + super(charSequence); + this.prefix = prefix; + } + + @Override + public String getFormattedMessage() { + if (prefixMessage == null) { + prefixMessage = prefix + super.getFormattedMessage(); + } + return prefixMessage; + } + + @Override + public void formatTo(StringBuilder buffer) { + buffer.append(prefix); + super.formatTo(buffer); + } + + @Override + public int length() { + return prefixMessage.length(); + } + + @Override + public char charAt(int index) { + return prefixMessage.charAt(index); + } + + @Override + public CharSequence subSequence(int start, int end) { + return prefixMessage.subSequence(start, end); + } + + } + + @Override + public Message newMessage(String message, Object... params) { + return new PrefixParameterizedMessage(prefix, message, params); + } + + private static class PrefixParameterizedMessage extends ParameterizedMessage { + + private static ThreadLocal threadLocalStringBuilder = ThreadLocal.withInitial(StringBuilder::new); + + private final String prefix; + private String formattedMessage; + + private PrefixParameterizedMessage(String prefix, String messagePattern, Object... arguments) { + super(messagePattern, arguments); + this.prefix = prefix; + } + + @Override + public String getFormattedMessage() { + if (formattedMessage == null) { + final StringBuilder buffer = threadLocalStringBuilder.get(); + buffer.setLength(0); + formatTo(buffer); + formattedMessage = buffer.toString(); + } + return formattedMessage; + } + + @Override + public void formatTo(StringBuilder buffer) { + buffer.append(prefix); + super.formatTo(buffer); + } + + } + + @Override + public Message newMessage(CharSequence charSequence) { + return new PrefixSimpleMessage(prefix, charSequence); + } + + @Override + public Message newMessage(String message, Object p0) { + return new PrefixParameterizedMessage(prefix, message, p0); + } + + @Override + public Message newMessage(String message, Object p0, Object p1) { + return new PrefixParameterizedMessage(prefix, message, p0, p1); + } + + @Override + public Message newMessage(String message, Object p0, Object p1, Object p2) { + return new PrefixParameterizedMessage(prefix, message, p0, p1, p2); + } + + @Override + public Message newMessage(String message, Object p0, Object p1, Object p2, Object p3) { + return new PrefixParameterizedMessage(prefix, message, p0, p1, p2, p3); + } + + @Override + public Message newMessage(String message, Object p0, Object p1, Object p2, Object p3, Object p4) { + return new PrefixParameterizedMessage(prefix, message, p0, p1, p2, p3, p4); + } + + @Override + public Message newMessage(String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5) { + return new PrefixParameterizedMessage(prefix, message, p0, p1, p2, p3, p4, p5); + } + + @Override + public Message newMessage(String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6) { + return new PrefixParameterizedMessage(prefix, message, p0, p1, p2, p3, p4, p5, p6); + } + + @Override + public Message newMessage(String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6, Object p7) { + return new PrefixParameterizedMessage(prefix, message, p0, p1, p2, p3, p4, p5, p6, p7); + } + + @Override + public Message newMessage( + String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6, Object p7, Object p8) { + return new PrefixParameterizedMessage(prefix, message, p0, p1, p2, p3, p4, p5, p6, p7, p8); + } + + @Override + public Message newMessage( + String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6, Object p7, Object p8, Object p9) { + return new PrefixParameterizedMessage(prefix, message, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9); + } +} diff --git a/core/src/main/java/org/elasticsearch/common/logging/TerminalAppender.java b/core/src/main/java/org/elasticsearch/common/logging/TerminalAppender.java deleted file mode 100644 index e967ad9d79e..00000000000 --- a/core/src/main/java/org/elasticsearch/common/logging/TerminalAppender.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - - -package org.elasticsearch.common.logging; - -import org.apache.log4j.AppenderSkeleton; -import org.apache.log4j.spi.LoggingEvent; -import org.elasticsearch.cli.Terminal; - -/** - * TerminalAppender logs event to Terminal.DEFAULT. It is used for example by the PluginCli. - * */ -public class TerminalAppender extends AppenderSkeleton { - @Override - protected void append(LoggingEvent event) { - Terminal.DEFAULT.println(event.getRenderedMessage()); - } - - @Override - public void close() { - } - - @Override - public boolean requiresLayout() { - return false; - } -} diff --git a/core/src/main/java/org/elasticsearch/common/lucene/LoggerInfoStream.java b/core/src/main/java/org/elasticsearch/common/lucene/LoggerInfoStream.java index 0000a138bd2..c4ef2ef8c70 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/LoggerInfoStream.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/LoggerInfoStream.java @@ -19,8 +19,8 @@ package org.elasticsearch.common.lucene; +import org.apache.logging.log4j.Logger; import org.apache.lucene.util.InfoStream; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; /** An InfoStream (for Lucene's IndexWriter) that redirects @@ -30,12 +30,12 @@ public final class LoggerInfoStream extends InfoStream { /** Used for component-specific logging: */ /** Logger for everything */ - private final ESLogger logger; + private final Logger logger; /** Logger for IndexFileDeleter */ - private final ESLogger ifdLogger; + private final Logger ifdLogger; - public LoggerInfoStream(ESLogger parentLogger) { + public LoggerInfoStream(Logger parentLogger) { logger = Loggers.getLogger(parentLogger, ".lucene.iw"); ifdLogger = Loggers.getLogger(parentLogger, ".lucene.iw.ifd"); } @@ -52,7 +52,7 @@ public final class LoggerInfoStream extends InfoStream { return getLogger(component).isTraceEnabled() && component.equals("TP") == false; } - private ESLogger getLogger(String component) { + private Logger getLogger(String component) { if (component.equals("IFD")) { return ifdLogger; } else { diff --git a/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java index d555e199d08..94e1f05e46b 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -19,6 +19,9 @@ package org.elasticsearch.common.lucene; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.analysis.core.KeywordAnalyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.codecs.CodecUtil; @@ -67,7 +70,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.index.analysis.AnalyzerScope; import org.elasticsearch.index.analysis.NamedAnalyzer; @@ -104,14 +106,14 @@ public class Lucene { public static final TopDocs EMPTY_TOP_DOCS = new TopDocs(0, EMPTY_SCORE_DOCS, 0.0f); - public static Version parseVersion(@Nullable String version, Version defaultVersion, ESLogger logger) { + public static Version parseVersion(@Nullable String version, Version defaultVersion, Logger logger) { if (version == null) { return defaultVersion; } try { return Version.parse(version); } catch (ParseException e) { - logger.warn("no version match {}, default to {}", e, version, defaultVersion); + logger.warn((Supplier) () -> new ParameterizedMessage("no version match {}, default to {}", version, defaultVersion), e); return defaultVersion; } } diff --git a/core/src/main/java/org/elasticsearch/common/network/IfConfig.java b/core/src/main/java/org/elasticsearch/common/network/IfConfig.java index e32129c9386..7fd4cc6d2f3 100644 --- a/core/src/main/java/org/elasticsearch/common/network/IfConfig.java +++ b/core/src/main/java/org/elasticsearch/common/network/IfConfig.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.network; -import org.elasticsearch.common.logging.ESLogger; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.Loggers; import java.io.IOException; @@ -36,7 +36,7 @@ import java.util.Locale; */ final class IfConfig { - private static final ESLogger logger = Loggers.getLogger(IfConfig.class); + private static final Logger logger = Loggers.getLogger(IfConfig.class); private static final String INDENT = " "; /** log interface configuration at debug level, if its enabled */ diff --git a/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java index 152a5629dd8..787fa950bea 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java @@ -19,8 +19,9 @@ package org.elasticsearch.common.settings; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.search.spell.LevensteinDistance; -import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.collect.Tuple; @@ -35,7 +36,6 @@ import java.util.List; import java.util.Map; import java.util.Set; import java.util.SortedMap; -import java.util.SortedSet; import java.util.TreeMap; import java.util.concurrent.CopyOnWriteArrayList; import java.util.function.BiConsumer; @@ -129,7 +129,7 @@ public abstract class AbstractScopedSettings extends AbstractComponent { settingUpdater.getValue(current, previous); } catch (RuntimeException ex) { exceptions.add(ex); - logger.debug("failed to prepareCommit settings for [{}]", ex, settingUpdater); + logger.debug((Supplier) () -> new ParameterizedMessage("failed to prepareCommit settings for [{}]", settingUpdater), ex); } } // here we are exhaustive and record all settings that failed. @@ -157,7 +157,8 @@ public abstract class AbstractScopedSettings extends AbstractComponent { try { applyRunnables.add(settingUpdater.updater(current, previous)); } catch (Exception ex) { - logger.warn("failed to prepareCommit settings for [{}]", ex, settingUpdater); + logger.warn( + (Supplier) () -> new ParameterizedMessage("failed to prepareCommit settings for [{}]", settingUpdater), ex); throw ex; } } @@ -521,7 +522,9 @@ public abstract class AbstractScopedSettings extends AbstractComponent { } } catch (IllegalArgumentException ex) { changed = true; - logger.warn("found invalid setting: {} value: {} - archiving",ex , entry.getKey(), entry.getValue()); + logger.warn( + (Supplier) () -> new ParameterizedMessage( + "found invalid setting: {} value: {} - archiving", entry.getKey(), entry.getValue()), ex); /* * We put them back in here such that tools can check from the outside if there are any indices with broken settings. The * setting can remain there but we want users to be aware that some of their setting are broken and they can research why diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 094530dac67..1ce156b8536 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -45,6 +45,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAl import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Setting.Property; @@ -132,7 +133,7 @@ public final class ClusterSettings extends AbstractScopedSettings { if (ESLoggerFactory.LOG_LEVEL_SETTING.getConcreteSetting(key).exists(settings) == false) { builder.putNull(key); } else { - builder.put(key, ESLoggerFactory.LOG_LEVEL_SETTING.getConcreteSetting(key).get(settings).name()); + builder.put(key, ESLoggerFactory.LOG_LEVEL_SETTING.getConcreteSetting(key).get(settings)); } } } @@ -144,12 +145,18 @@ public final class ClusterSettings extends AbstractScopedSettings { for (String key : value.getAsMap().keySet()) { assert loggerPredicate.test(key); String component = key.substring("logger.".length()); + if ("level".equals(component)) { + continue; + } if ("_root".equals(component)) { final String rootLevel = value.get(key); - ESLoggerFactory.getRootLogger().setLevel(rootLevel == null ? ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.get(settings) - .name() : rootLevel); + if (rootLevel == null) { + Loggers.setLevel(ESLoggerFactory.getRootLogger(), ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.get(settings)); + } else { + Loggers.setLevel(ESLoggerFactory.getRootLogger(), rootLevel); + } } else { - ESLoggerFactory.getLogger(component).setLevel(value.get(key)); + Loggers.setLevel(ESLoggerFactory.getLogger(component), value.get(key)); } } } diff --git a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index 370a6c07668..fc0d5f4df9f 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -140,6 +140,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { PrimaryShardAllocator.INDEX_RECOVERY_INITIAL_SHARDS_SETTING, FsDirectoryService.INDEX_LOCK_FACTOR_SETTING, EngineConfig.INDEX_CODEC_SETTING, + EngineConfig.INDEX_OPTIMIZE_AUTO_GENERATED_IDS, IndexMetaData.SETTING_WAIT_FOR_ACTIVE_SHARDS, // validate that built-in similarities don't get redefined Setting.groupSetting("index.similarity.", (s) -> { diff --git a/core/src/main/java/org/elasticsearch/common/settings/Setting.java b/core/src/main/java/org/elasticsearch/common/settings/Setting.java index 1d67008ab0b..12f4805ba2f 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/core/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.common.settings; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.support.ToXContentToBytes; @@ -26,7 +27,6 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.unit.ByteSizeValue; @@ -374,7 +374,7 @@ public class Setting extends ToXContentToBytes { /** * Build a new updater with a noop validator. */ - final AbstractScopedSettings.SettingUpdater newUpdater(Consumer consumer, ESLogger logger) { + final AbstractScopedSettings.SettingUpdater newUpdater(Consumer consumer, Logger logger) { return newUpdater(consumer, logger, (s) -> {}); } @@ -382,7 +382,7 @@ public class Setting extends ToXContentToBytes { * Build the updater responsible for validating new values, logging the new * value, and eventually setting the value where it belongs. */ - AbstractScopedSettings.SettingUpdater newUpdater(Consumer consumer, ESLogger logger, Consumer validator) { + AbstractScopedSettings.SettingUpdater newUpdater(Consumer consumer, Logger logger, Consumer validator) { if (isDynamic()) { return new Updater(consumer, logger, validator); } else { @@ -395,7 +395,7 @@ public class Setting extends ToXContentToBytes { * and its usage for details. */ static AbstractScopedSettings.SettingUpdater> compoundUpdater(final BiConsumer consumer, - final Setting aSetting, final Setting bSetting, ESLogger logger) { + final Setting aSetting, final Setting bSetting, Logger logger) { final AbstractScopedSettings.SettingUpdater aSettingUpdater = aSetting.newUpdater(null, logger); final AbstractScopedSettings.SettingUpdater bSettingUpdater = bSetting.newUpdater(null, logger); return new AbstractScopedSettings.SettingUpdater>() { @@ -424,10 +424,10 @@ public class Setting extends ToXContentToBytes { private final class Updater implements AbstractScopedSettings.SettingUpdater { private final Consumer consumer; - private final ESLogger logger; + private final Logger logger; private final Consumer accept; - public Updater(Consumer consumer, ESLogger logger, Consumer accept) { + public Updater(Consumer consumer, Logger logger, Consumer accept) { this.consumer = consumer; this.logger = logger; this.accept = accept; @@ -707,7 +707,7 @@ public class Setting extends ToXContentToBytes { } @Override - public AbstractScopedSettings.SettingUpdater newUpdater(Consumer consumer, ESLogger logger, + public AbstractScopedSettings.SettingUpdater newUpdater(Consumer consumer, Logger logger, Consumer validator) { if (isDynamic() == false) { throw new IllegalStateException("setting [" + getKey() + "] is not dynamic"); @@ -831,7 +831,7 @@ public class Setting extends ToXContentToBytes { } @Override - AbstractScopedSettings.SettingUpdater newUpdater(Consumer consumer, ESLogger logger, Consumer validator) { + AbstractScopedSettings.SettingUpdater newUpdater(Consumer consumer, Logger logger, Consumer validator) { throw new UnsupportedOperationException("Affix settings can't be updated. Use #getConcreteSetting for updating."); } diff --git a/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java b/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java index 5fd19c4fc1f..60276ce14f7 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java +++ b/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java @@ -19,9 +19,9 @@ package org.elasticsearch.common.settings; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.inject.Binder; import org.elasticsearch.common.inject.Module; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -51,7 +51,7 @@ public class SettingsModule implements Module { private final Map> indexSettings = new HashMap<>(); private static final Predicate TRIBE_CLIENT_NODE_SETTINGS_PREDICATE = (s) -> s.startsWith("tribe.") && TribeService.TRIBE_SETTING_KEYS.contains(s) == false; - private final ESLogger logger; + private final Logger logger; private final IndexScopedSettings indexScopedSettings; private final ClusterSettings clusterSettings; diff --git a/core/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java b/core/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java index 32df65850a8..5944a8e06e6 100644 --- a/core/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java +++ b/core/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java @@ -23,20 +23,25 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import java.io.IOException; import java.util.Locale; import java.util.Objects; -public class ByteSizeValue implements Streamable { +public class ByteSizeValue implements Writeable { - private long size; + private final long size; + private final ByteSizeUnit sizeUnit; - private ByteSizeUnit sizeUnit; - - private ByteSizeValue() { + public ByteSizeValue(StreamInput in) throws IOException { + size = in.readVLong(); + sizeUnit = ByteSizeUnit.BYTES; + } + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(bytes()); } public ByteSizeValue(long bytes) { @@ -172,7 +177,8 @@ public class ByteSizeValue implements Streamable { return parseBytesSizeValue(sValue, null, settingName); } - public static ByteSizeValue parseBytesSizeValue(String sValue, ByteSizeValue defaultValue, String settingName) throws ElasticsearchParseException { + public static ByteSizeValue parseBytesSizeValue(String sValue, ByteSizeValue defaultValue, String settingName) + throws ElasticsearchParseException { settingName = Objects.requireNonNull(settingName); if (sValue == null) { return defaultValue; @@ -210,7 +216,9 @@ public class ByteSizeValue implements Streamable { bytes = 0; } else { // Missing units: - throw new ElasticsearchParseException("failed to parse setting [{}] with value [{}] as a size in bytes: unit is missing or unrecognized", settingName, sValue); + throw new ElasticsearchParseException( + "failed to parse setting [{}] with value [{}] as a size in bytes: unit is missing or unrecognized", + settingName, sValue); } } catch (NumberFormatException e) { throw new ElasticsearchParseException("failed to parse [{}]", e, sValue); @@ -218,23 +226,6 @@ public class ByteSizeValue implements Streamable { return new ByteSizeValue(bytes, ByteSizeUnit.BYTES); } - public static ByteSizeValue readBytesSizeValue(StreamInput in) throws IOException { - ByteSizeValue sizeValue = new ByteSizeValue(); - sizeValue.readFrom(in); - return sizeValue; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - size = in.readVLong(); - sizeUnit = ByteSizeUnit.BYTES; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVLong(bytes()); - } - @Override public boolean equals(Object o) { if (this == o) { diff --git a/core/src/main/java/org/elasticsearch/common/unit/SizeValue.java b/core/src/main/java/org/elasticsearch/common/unit/SizeValue.java index e04dfe51430..cba51f29eeb 100644 --- a/core/src/main/java/org/elasticsearch/common/unit/SizeValue.java +++ b/core/src/main/java/org/elasticsearch/common/unit/SizeValue.java @@ -23,22 +23,14 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import java.io.IOException; -/** - * - */ -public class SizeValue implements Streamable { +public class SizeValue implements Writeable { - private long size; - - private SizeUnit sizeUnit; - - private SizeValue() { - - } + private final long size; + private final SizeUnit sizeUnit; public SizeValue(long singles) { this(singles, SizeUnit.SINGLE); @@ -52,6 +44,16 @@ public class SizeValue implements Streamable { this.sizeUnit = sizeUnit; } + public SizeValue(StreamInput in) throws IOException { + size = in.readVLong(); + sizeUnit = SizeUnit.SINGLE; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(singles()); + } + public long singles() { return sizeUnit.toSingles(size); } @@ -194,23 +196,6 @@ public class SizeValue implements Streamable { return new SizeValue(singles, SizeUnit.SINGLE); } - public static SizeValue readSizeValue(StreamInput in) throws IOException { - SizeValue sizeValue = new SizeValue(); - sizeValue.readFrom(in); - return sizeValue; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - size = in.readVLong(); - sizeUnit = SizeUnit.SINGLE; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVLong(singles()); - } - @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/core/src/main/java/org/elasticsearch/common/util/IndexFolderUpgrader.java b/core/src/main/java/org/elasticsearch/common/util/IndexFolderUpgrader.java index 221dc234511..7f550bc1c26 100644 --- a/core/src/main/java/org/elasticsearch/common/util/IndexFolderUpgrader.java +++ b/core/src/main/java/org/elasticsearch/common/util/IndexFolderUpgrader.java @@ -19,9 +19,9 @@ package org.elasticsearch.common.util; +import org.apache.logging.log4j.Logger; import org.apache.lucene.util.IOUtils; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.NodeEnvironment; @@ -41,7 +41,7 @@ import java.nio.file.StandardCopyOption; public class IndexFolderUpgrader { private final NodeEnvironment nodeEnv; private final Settings settings; - private final ESLogger logger = Loggers.getLogger(IndexFolderUpgrader.class); + private final Logger logger = Loggers.getLogger(IndexFolderUpgrader.class); /** * Creates a new upgrader instance diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/AbstractLifecycleRunnable.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/AbstractLifecycleRunnable.java index 30d7e63ec85..e7a38f1eb64 100644 --- a/core/src/main/java/org/elasticsearch/common/util/concurrent/AbstractLifecycleRunnable.java +++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/AbstractLifecycleRunnable.java @@ -18,8 +18,8 @@ */ package org.elasticsearch.common.util.concurrent; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.component.Lifecycle; -import org.elasticsearch.common.logging.ESLogger; import java.util.Objects; @@ -36,7 +36,7 @@ public abstract class AbstractLifecycleRunnable extends AbstractRunnable { /** * The service's logger (note: this is passed in!). */ - private final ESLogger logger; + private final Logger logger; /** * {@link AbstractLifecycleRunnable} must be aware of the actual {@code lifecycle} to react properly. @@ -45,7 +45,7 @@ public abstract class AbstractLifecycleRunnable extends AbstractRunnable { * @param logger The logger to use when logging * @throws NullPointerException if any parameter is {@code null} */ - public AbstractLifecycleRunnable(Lifecycle lifecycle, ESLogger logger) { + public AbstractLifecycleRunnable(Lifecycle lifecycle, Logger logger) { this.lifecycle = Objects.requireNonNull(lifecycle, "lifecycle must not be null"); this.logger = Objects.requireNonNull(logger, "logger must not be null"); } diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/AsyncIOProcessor.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/AsyncIOProcessor.java index d201cf94f93..ad68471041b 100644 --- a/core/src/main/java/org/elasticsearch/common/util/concurrent/AsyncIOProcessor.java +++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/AsyncIOProcessor.java @@ -18,8 +18,8 @@ */ package org.elasticsearch.common.util.concurrent; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.logging.ESLogger; import java.io.IOException; import java.util.ArrayList; @@ -37,11 +37,11 @@ import java.util.function.Consumer; * might be blocked until other items are processed */ public abstract class AsyncIOProcessor { - private final ESLogger logger; + private final Logger logger; private final ArrayBlockingQueue>> queue; private final Semaphore promiseSemaphore = new Semaphore(1); - protected AsyncIOProcessor(ESLogger logger, int queueSize) { + protected AsyncIOProcessor(Logger logger, int queueSize) { this.logger = logger; this.queue = new ArrayBlockingQueue<>(queueSize); } diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/LoggingRunnable.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/LoggingRunnable.java index 878645eddf7..825d18b7e63 100644 --- a/core/src/main/java/org/elasticsearch/common/util/concurrent/LoggingRunnable.java +++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/LoggingRunnable.java @@ -19,17 +19,16 @@ package org.elasticsearch.common.util.concurrent; -import org.elasticsearch.common.logging.ESLogger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; -/** - */ public class LoggingRunnable implements Runnable { private final Runnable runnable; + private final Logger logger; - private final ESLogger logger; - - public LoggingRunnable(ESLogger logger, Runnable runnable) { + public LoggingRunnable(Logger logger, Runnable runnable) { this.runnable = runnable; this.logger = logger; } @@ -39,7 +38,8 @@ public class LoggingRunnable implements Runnable { try { runnable.run(); } catch (Exception e) { - logger.warn("failed to execute [{}]", e, runnable.toString()); + logger.warn((Supplier) () -> new ParameterizedMessage("failed to execute [{}]", runnable.toString()), e); } } + } diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java b/core/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java index eaaa98167bd..9f313a59b90 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java @@ -300,16 +300,7 @@ public abstract class AbstractXContentParser implements XContentParser { } else if (token == XContentParser.Token.VALUE_STRING) { return parser.text(); } else if (token == XContentParser.Token.VALUE_NUMBER) { - XContentParser.NumberType numberType = parser.numberType(); - if (numberType == XContentParser.NumberType.INT) { - return parser.intValue(); - } else if (numberType == XContentParser.NumberType.LONG) { - return parser.longValue(); - } else if (numberType == XContentParser.NumberType.FLOAT) { - return parser.floatValue(); - } else if (numberType == XContentParser.NumberType.DOUBLE) { - return parser.doubleValue(); - } + return parser.numberValue(); } else if (token == XContentParser.Token.VALUE_BOOLEAN) { return parser.booleanValue(); } else if (token == XContentParser.Token.START_OBJECT) { diff --git a/core/src/main/java/org/elasticsearch/discovery/AckClusterStatePublishResponseHandler.java b/core/src/main/java/org/elasticsearch/discovery/AckClusterStatePublishResponseHandler.java index 26e78989591..b432d0538c9 100644 --- a/core/src/main/java/org/elasticsearch/discovery/AckClusterStatePublishResponseHandler.java +++ b/core/src/main/java/org/elasticsearch/discovery/AckClusterStatePublishResponseHandler.java @@ -18,8 +18,10 @@ */ package org.elasticsearch.discovery; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.ESLoggerFactory; import java.util.Set; @@ -31,7 +33,7 @@ import java.util.Set; */ public class AckClusterStatePublishResponseHandler extends BlockingClusterStatePublishResponseHandler { - private static final ESLogger logger = ESLoggerFactory.getLogger(AckClusterStatePublishResponseHandler.class.getName()); + private static final Logger logger = ESLoggerFactory.getLogger(AckClusterStatePublishResponseHandler.class.getName()); private final Discovery.AckListener ackListener; @@ -68,7 +70,7 @@ public class AckClusterStatePublishResponseHandler extends BlockingClusterStateP ackListener.onNodeAck(node, e); } catch (Exception inner) { inner.addSuppressed(e); - logger.debug("error while processing ack for node [{}]", inner, node); + logger.debug((Supplier) () -> new ParameterizedMessage("error while processing ack for node [{}]", node), inner); } } } diff --git a/core/src/main/java/org/elasticsearch/discovery/local/LocalDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/local/LocalDiscovery.java index c544db4047f..2d260787bd0 100644 --- a/core/src/main/java/org/elasticsearch/discovery/local/LocalDiscovery.java +++ b/core/src/main/java/org/elasticsearch/discovery/local/LocalDiscovery.java @@ -19,6 +19,8 @@ package org.elasticsearch.discovery.local; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -144,7 +146,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implements Discov @Override public void onFailure(String source, Exception e) { - logger.error("unexpected failure during [{}]", e, source); + logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); } }); } else if (firstMaster != null) { @@ -173,7 +175,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implements Discov @Override public void onFailure(String source, Exception e) { - logger.error("unexpected failure during [{}]", e, source); + logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); } }); @@ -238,7 +240,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implements Discov @Override public void onFailure(String source, Exception e) { - logger.error("unexpected failure during [{}]", e, source); + logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); } }); } @@ -329,7 +331,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implements Discov newNodeSpecificClusterState = discovery.lastProcessedClusterState.readDiffFrom(StreamInput.wrap(clusterStateDiffBytes)).apply(discovery.lastProcessedClusterState); logger.trace("sending diff cluster state version [{}] with size {} to [{}]", clusterState.version(), clusterStateDiffBytes.length, discovery.localNode().getName()); } catch (IncompatibleClusterStateVersionException ex) { - logger.warn("incompatible cluster state version [{}] - resending complete cluster state", ex, clusterState.version()); + logger.warn((Supplier) () -> new ParameterizedMessage("incompatible cluster state version [{}] - resending complete cluster state", clusterState.version()), ex); } } if (newNodeSpecificClusterState == null) { @@ -380,7 +382,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implements Discov @Override public void onFailure(String source, Exception e) { - logger.error("unexpected failure during [{}]", e, source); + logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); publishResponseHandler.onFailure(discovery.localNode(), e); } diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java b/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java index 1f4f57c4ed4..6f0b8966d09 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java @@ -18,6 +18,9 @@ */ package org.elasticsearch.discovery.zen; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -34,7 +37,6 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.unit.TimeValue; @@ -348,13 +350,13 @@ public class NodeJoinController extends AbstractComponent { static class JoinTaskListener implements ClusterStateTaskListener { final List callbacks; - private final ESLogger logger; + private final Logger logger; - JoinTaskListener(MembershipAction.JoinCallback callback, ESLogger logger) { + JoinTaskListener(MembershipAction.JoinCallback callback, Logger logger) { this(Collections.singletonList(callback), logger); } - JoinTaskListener(List callbacks, ESLogger logger) { + JoinTaskListener(List callbacks, Logger logger) { this.callbacks = callbacks; this.logger = logger; } @@ -365,7 +367,7 @@ public class NodeJoinController extends AbstractComponent { try { callback.onFailure(e); } catch (Exception inner) { - logger.error("error handling task failure [{}]", inner, e); + logger.error((Supplier) () -> new ParameterizedMessage("error handling task failure [{}]", e), inner); } } } @@ -376,7 +378,7 @@ public class NodeJoinController extends AbstractComponent { try { callback.onSuccess(); } catch (Exception e) { - logger.error("unexpected error during [{}]", e, source); + logger.error((Supplier) () -> new ParameterizedMessage("unexpected error during [{}]", source), e); } } } diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index d380b1fd601..c4fc4f15f40 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -19,6 +19,9 @@ package org.elasticsearch.discovery.zen; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; @@ -45,7 +48,6 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.internal.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -259,7 +261,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover try { membership.sendLeaveRequestBlocking(nodes.getMasterNode(), nodes.getLocalNode(), TimeValue.timeValueSeconds(1)); } catch (Exception e) { - logger.debug("failed to send leave request to master [{}]", e, nodes.getMasterNode()); + logger.debug((Supplier) () -> new ParameterizedMessage("failed to send leave request to master [{}]", nodes.getMasterNode()), e); } } else { // we're master -> let other potential master we left and start a master election now rather then wait for masterFD @@ -271,7 +273,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover try { membership.sendLeaveRequest(nodes.getLocalNode(), possibleMaster); } catch (Exception e) { - logger.debug("failed to send leave request from master [{}] to possible master [{}]", e, nodes.getMasterNode(), possibleMaster); + logger.debug((Supplier) () -> new ParameterizedMessage("failed to send leave request from master [{}] to possible master [{}]", nodes.getMasterNode(), possibleMaster), e); } } } @@ -330,7 +332,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover @Override public void onFailure(String source, Exception e) { - logger.error("unexpected failure during [{}]", e, source); + logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); } }); @@ -467,7 +469,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover // first, make sure we can connect to the master transportService.connectToNode(masterNode); } catch (Exception e) { - logger.warn("failed to connect to master [{}], retrying...", e, masterNode); + logger.warn((Supplier) () -> new ParameterizedMessage("failed to connect to master [{}], retrying...", masterNode), e); return false; } int joinAttempt = 0; // we retry on illegal state if the master is not yet ready @@ -487,7 +489,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover } } else { if (logger.isTraceEnabled()) { - logger.trace("failed to send join request to master [{}]", e, masterNode); + logger.trace((Supplier) () -> new ParameterizedMessage("failed to send join request to master [{}]", masterNode), e); } else { logger.info("failed to send join request to master [{}], reason [{}]", masterNode, ExceptionsHelper.detailedMessage(e)); } @@ -509,7 +511,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover private final AllocationService allocationService; private final ElectMasterService electMasterService; private final BiFunction rejoin; - private final ESLogger logger; + private final Logger logger; static class Task { @@ -539,7 +541,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover final AllocationService allocationService, final ElectMasterService electMasterService, final BiFunction rejoin, - final ESLogger logger) { + final Logger logger) { this.allocationService = allocationService; this.electMasterService = electMasterService; this.rejoin = rejoin; @@ -585,7 +587,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover @Override public void onFailure(final String source, final Exception e) { - logger.error("unexpected failure during [{}]", e, source); + logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); } @Override @@ -657,7 +659,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover @Override public void onFailure(String source, Exception e) { - logger.error("unexpected failure during [{}]", e, source); + logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); } @Override @@ -677,7 +679,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover return; } - logger.info("master_left [{}], reason [{}]", cause, masterNode, reason); + logger.info((Supplier) () -> new ParameterizedMessage("master_left [{}], reason [{}]", masterNode, reason), cause); clusterService.submitStateUpdateTask("master_failed (" + masterNode + ")", new ClusterStateUpdateTask(Priority.IMMEDIATE) { @@ -706,7 +708,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover @Override public void onFailure(String source, Exception e) { - logger.error("unexpected failure during [{}]", e, source); + logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); } @Override @@ -791,13 +793,13 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover @Override public void onFailure(String source, Exception e) { - logger.error("unexpected failure during [{}]", e, source); + logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); if (newClusterState != null) { try { publishClusterState.pendingStatesQueue().markAsFailed(newClusterState, e); } catch (Exception inner) { inner.addSuppressed(e); - logger.error("unexpected exception while failing [{}]", inner, source); + logger.error((Supplier) () -> new ParameterizedMessage("unexpected exception while failing [{}]", source), inner); } } } @@ -821,7 +823,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover * If the first condition fails we reject the cluster state and throw an error. * If the second condition fails we ignore the cluster state. */ - public static boolean shouldIgnoreOrRejectNewClusterState(ESLogger logger, ClusterState currentState, ClusterState newClusterState) { + public static boolean shouldIgnoreOrRejectNewClusterState(Logger logger, ClusterState currentState, ClusterState newClusterState) { validateStateIsFromCurrentMaster(logger, currentState.nodes(), newClusterState); // reject cluster states that are not new from the same master @@ -845,7 +847,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover * This method checks for this and throws an exception if needed */ - public static void validateStateIsFromCurrentMaster(ESLogger logger, DiscoveryNodes currentNodes, ClusterState newClusterState) { + public static void validateStateIsFromCurrentMaster(Logger logger, DiscoveryNodes currentNodes, ClusterState newClusterState) { if (currentNodes.getMasterNodeId() == null) { return; } @@ -880,7 +882,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover try { membership.sendValidateJoinRequestBlocking(node, state, joinTimeout); } catch (Exception e) { - logger.warn("failed to validate incoming join request from node [{}]", e, node); + logger.warn((Supplier) () -> new ParameterizedMessage("failed to validate incoming join request from node [{}]", node), e); callback.onFailure(new IllegalStateException("failure when sending a validation request to node", e)); return; } @@ -964,7 +966,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover } } - static List filterPingResponses(ZenPing.PingResponse[] fullPingResponses, boolean masterElectionIgnoreNonMasters, ESLogger logger) { + static List filterPingResponses(ZenPing.PingResponse[] fullPingResponses, boolean masterElectionIgnoreNonMasters, Logger logger) { List pingResponses; if (masterElectionIgnoreNonMasters) { pingResponses = Arrays.stream(fullPingResponses).filter(ping -> ping.node().isMasterNode()).collect(Collectors.toList()); @@ -1034,11 +1036,11 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover @Override public void handleException(TransportException exp) { - logger.warn("failed to send rejoin request to [{}]", exp, otherMaster); + logger.warn((Supplier) () -> new ParameterizedMessage("failed to send rejoin request to [{}]", otherMaster), exp); } }); } catch (Exception e) { - logger.warn("failed to send rejoin request to [{}]", e, otherMaster); + logger.warn((Supplier) () -> new ParameterizedMessage("failed to send rejoin request to [{}]", otherMaster), e); } return localClusterState; } @@ -1157,7 +1159,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover @Override public void onFailure(String source, Exception e) { - logger.error("unexpected failure during [{}]", e, source); + logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); } }); } diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java b/core/src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java index bca13211c6c..6dc89998046 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java @@ -19,6 +19,8 @@ package org.elasticsearch.discovery.zen.fd; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -34,7 +36,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportException; @@ -42,6 +43,7 @@ import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportService; import java.io.IOException; @@ -283,8 +285,13 @@ public class MasterFaultDetection extends FaultDetection { } int retryCount = ++MasterFaultDetection.this.retryCount; - logger.trace("[master] failed to ping [{}], retry [{}] out of [{}]", exp, masterNode, retryCount, - pingRetryCount); + logger.trace( + (Supplier) () -> new ParameterizedMessage( + "[master] failed to ping [{}], retry [{}] out of [{}]", + masterNode, + retryCount, + pingRetryCount), + exp); if (retryCount >= pingRetryCount) { logger.debug("[master] failed to ping [{}], tried [{}] times, each with maximum [{}] timeout", masterNode, pingRetryCount, pingRetryTimeout); diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/fd/NodesFaultDetection.java b/core/src/main/java/org/elasticsearch/discovery/zen/fd/NodesFaultDetection.java index 4f17b14ff6b..40eb36cec1f 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/fd/NodesFaultDetection.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/fd/NodesFaultDetection.java @@ -19,6 +19,8 @@ package org.elasticsearch.discovery.zen.fd; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -28,7 +30,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportException; @@ -36,6 +37,7 @@ import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportService; import java.io.IOException; @@ -166,7 +168,12 @@ public class NodesFaultDetection extends FaultDetection { } }); } catch (EsRejectedExecutionException ex) { - logger.trace("[node ] [{}] ignoring node failure (reason [{}]). Local node is shutting down", ex, node, reason); + logger.trace( + (Supplier) () -> new ParameterizedMessage( + "[node ] [{}] ignoring node failure (reason [{}]). Local node is shutting down", + node, + reason), + ex); } } @@ -231,7 +238,13 @@ public class NodesFaultDetection extends FaultDetection { } retryCount++; - logger.trace("[node ] failed to ping [{}], retry [{}] out of [{}]", exp, node, retryCount, pingRetryCount); + logger.trace( + (Supplier) () -> new ParameterizedMessage( + "[node ] failed to ping [{}], retry [{}] out of [{}]", + node, + retryCount, + pingRetryCount), + exp); if (retryCount >= pingRetryCount) { logger.debug("[node ] failed to ping [{}], tried [{}] times, each with maximum [{}] timeout", node, pingRetryCount, pingRetryTimeout); diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java b/core/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java index e1466651da7..176ac5763e3 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java @@ -20,6 +20,8 @@ package org.elasticsearch.discovery.zen.ping.unicast; import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; @@ -46,7 +48,6 @@ import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.discovery.zen.ping.PingContextProvider; import org.elasticsearch.discovery.zen.ping.ZenPing; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.RemoteTransportException; import org.elasticsearch.transport.TransportChannel; @@ -55,6 +56,7 @@ import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportService; import java.io.Closeable; @@ -413,13 +415,18 @@ public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPin success = true; } catch (ConnectTransportException e) { // can't connect to the node - this is a more common path! - logger.trace("[{}] failed to connect to {}", e, sendPingsHandler.id(), finalNodeToSend); + logger.trace( + (Supplier) () -> new ParameterizedMessage( + "[{}] failed to connect to {}", sendPingsHandler.id(), finalNodeToSend), e); } catch (RemoteTransportException e) { // something went wrong on the other side - logger.debug("[{}] received a remote error as a response to ping {}", e, - sendPingsHandler.id(), finalNodeToSend); + logger.debug( + (Supplier) () -> new ParameterizedMessage( + "[{}] received a remote error as a response to ping {}", sendPingsHandler.id(), finalNodeToSend), e); } catch (Exception e) { - logger.warn("[{}] failed send ping to {}", e, sendPingsHandler.id(), finalNodeToSend); + logger.warn( + (Supplier) () -> new ParameterizedMessage( + "[{}] failed send ping to {}", sendPingsHandler.id(), finalNodeToSend), e); } finally { if (!success) { latch.countDown(); @@ -486,9 +493,9 @@ public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPin latch.countDown(); if (exp instanceof ConnectTransportException) { // ok, not connected... - logger.trace("failed to connect to {}", exp, nodeToSend); + logger.trace((Supplier) () -> new ParameterizedMessage("failed to connect to {}", nodeToSend), exp); } else { - logger.warn("failed to send ping to [{}]", exp, node); + logger.warn((Supplier) () -> new ParameterizedMessage("failed to send ping to [{}]", node), exp); } } }); diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/publish/PendingClusterStatesQueue.java b/core/src/main/java/org/elasticsearch/discovery/zen/publish/PendingClusterStatesQueue.java index 24b093627b6..01fb96b7133 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/publish/PendingClusterStatesQueue.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/publish/PendingClusterStatesQueue.java @@ -18,10 +18,10 @@ */ package org.elasticsearch.discovery.zen.publish; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.logging.ESLogger; import java.util.ArrayList; import java.util.Locale; @@ -55,10 +55,10 @@ public class PendingClusterStatesQueue { } final ArrayList pendingStates = new ArrayList<>(); - final ESLogger logger; + final Logger logger; final int maxQueueSize; - public PendingClusterStatesQueue(ESLogger logger, int maxQueueSize) { + public PendingClusterStatesQueue(Logger logger, int maxQueueSize) { this.logger = logger; this.maxQueueSize = maxQueueSize; } diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java b/core/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java index 10f874923da..06c25ebf81a 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java @@ -19,6 +19,7 @@ package org.elasticsearch.discovery.zen.publish; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -244,7 +245,8 @@ public class PublishClusterStateAction extends AbstractComponent { bytes = serializeFullClusterState(clusterState, node.getVersion()); serializedStates.put(node.getVersion(), bytes); } catch (Exception e) { - logger.warn("failed to serialize cluster_state before publishing it to node {}", e, node); + logger.warn( + (org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage("failed to serialize cluster_state before publishing it to node {}", node), e); sendingController.onNodeSendFailed(node, e); return; } @@ -290,13 +292,14 @@ public class PublishClusterStateAction extends AbstractComponent { logger.debug("resending full cluster state to node {} reason {}", node, exp.getDetailedMessage()); sendFullClusterState(clusterState, serializedStates, node, publishTimeout, sendingController); } else { - logger.debug("failed to send cluster state to {}", exp, node); + logger.debug((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage("failed to send cluster state to {}", node), exp); sendingController.onNodeSendFailed(node, exp); } } }); } catch (Exception e) { - logger.warn("error sending cluster state to {}", e, node); + logger.warn( + (org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage("error sending cluster state to {}", node), e); sendingController.onNodeSendFailed(node, e); } } @@ -322,12 +325,12 @@ public class PublishClusterStateAction extends AbstractComponent { @Override public void handleException(TransportException exp) { - logger.debug("failed to commit cluster state (uuid [{}], version [{}]) to {}", exp, clusterState.stateUUID(), clusterState.version(), node); + logger.debug((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage("failed to commit cluster state (uuid [{}], version [{}]) to {}", clusterState.stateUUID(), clusterState.version(), node), exp); sendingController.getPublishResponseHandler().onFailure(node, exp); } }); } catch (Exception t) { - logger.warn("error sending cluster state commit (uuid [{}], version [{}]) to {}", t, clusterState.stateUUID(), clusterState.version(), node); + logger.warn((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage("error sending cluster state commit (uuid [{}], version [{}]) to {}", clusterState.stateUUID(), clusterState.version(), node), t); sendingController.getPublishResponseHandler().onFailure(node, t); } } @@ -626,7 +629,7 @@ public class PublishClusterStateAction extends AbstractComponent { if (committedOrFailed()) { return committed == false; } - logger.trace("failed to commit version [{}]. {}", reason, clusterState.version(), details); + logger.trace((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage("failed to commit version [{}]. {}", clusterState.version(), details), reason); committed = false; committedOrFailedLatch.countDown(); return true; diff --git a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java index 4ddf0e38b75..df9514cdf88 100644 --- a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -19,6 +19,9 @@ package org.elasticsearch.env; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.store.Directory; @@ -36,7 +39,6 @@ import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -83,7 +85,7 @@ import static java.util.Collections.unmodifiableSet; */ public final class NodeEnvironment implements Closeable { - private final ESLogger logger; + private final Logger logger; public static class NodePath { /* ${data.paths}/nodes/{node.id} */ @@ -196,7 +198,7 @@ public final class NodeEnvironment implements Closeable { boolean success = false; // trace logger to debug issues before the default node name is derived from the node id - ESLogger startupTraceLogger = Loggers.getLogger(getClass(), settings); + Logger startupTraceLogger = Loggers.getLogger(getClass(), settings); try { sharedDataPath = environment.sharedDataFile(); @@ -231,7 +233,8 @@ public final class NodeEnvironment implements Closeable { } } catch (IOException e) { - startupTraceLogger.trace("failed to obtain node lock on {}", e, dir.toAbsolutePath()); + startupTraceLogger.trace( + (Supplier) () -> new ParameterizedMessage("failed to obtain node lock on {}", dir.toAbsolutePath()), e); lastException = new IOException("failed to obtain lock on " + dir.toAbsolutePath(), e); // release all the ones that were obtained up until now releaseAndNullLocks(locks); @@ -392,7 +395,7 @@ public final class NodeEnvironment implements Closeable { * scans the node paths and loads existing metaData file. If not found a new meta data will be generated * and persisted into the nodePaths */ - private static NodeMetaData loadOrCreateNodeMetaData(Settings settings, ESLogger logger, + private static NodeMetaData loadOrCreateNodeMetaData(Settings settings, Logger logger, NodePath... nodePaths) throws IOException { final Path[] paths = Arrays.stream(nodePaths).map(np -> np.path).toArray(Path[]::new); NodeMetaData metaData = NodeMetaData.FORMAT.loadLatestState(logger, paths); @@ -884,7 +887,7 @@ public final class NodeEnvironment implements Closeable { logger.trace("releasing lock [{}]", lock); lock.close(); } catch (IOException e) { - logger.trace("failed to release lock [{}]", e, lock); + logger.trace((Supplier) () -> new ParameterizedMessage("failed to release lock [{}]", lock), e); } } } diff --git a/core/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java b/core/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java index dc7194b949c..42c40034b10 100644 --- a/core/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java +++ b/core/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java @@ -19,6 +19,9 @@ package org.elasticsearch.gateway; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; @@ -30,7 +33,6 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.transport.ReceiveTimeoutTransportException; @@ -63,7 +65,7 @@ public abstract class AsyncShardFetch implements Rel void list(ShardId shardId, DiscoveryNode[] nodes, ActionListener listener); } - protected final ESLogger logger; + protected final Logger logger; protected final String type; private final ShardId shardId; private final Lister, T> action; @@ -72,7 +74,7 @@ public abstract class AsyncShardFetch implements Rel private boolean closed; @SuppressWarnings("unchecked") - protected AsyncShardFetch(ESLogger logger, String type, ShardId shardId, Lister, T> action) { + protected AsyncShardFetch(Logger logger, String type, ShardId shardId, Lister, T> action) { this.logger = logger; this.type = type; this.shardId = shardId; @@ -200,7 +202,7 @@ public abstract class AsyncShardFetch implements Rel if (unwrappedCause instanceof EsRejectedExecutionException || unwrappedCause instanceof ReceiveTimeoutTransportException || unwrappedCause instanceof ElasticsearchTimeoutException) { nodeEntry.restartFetching(); } else { - logger.warn("{}: failed to list shard for {} on node [{}]", failure, shardId, type, failure.nodeId()); + logger.warn((Supplier) () -> new ParameterizedMessage("{}: failed to list shard for {} on node [{}]", shardId, type, failure.nodeId()), failure); nodeEntry.doneFetching(failure.getCause()); } } diff --git a/core/src/main/java/org/elasticsearch/gateway/Gateway.java b/core/src/main/java/org/elasticsearch/gateway/Gateway.java index ee2abc17ab6..3030632a769 100644 --- a/core/src/main/java/org/elasticsearch/gateway/Gateway.java +++ b/core/src/main/java/org/elasticsearch/gateway/Gateway.java @@ -21,6 +21,7 @@ package org.elasticsearch.gateway; import com.carrotsearch.hppc.ObjectFloatHashMap; import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; @@ -39,9 +40,6 @@ import org.elasticsearch.indices.IndicesService; import java.util.Arrays; import java.util.function.Supplier; -/** - * - */ public class Gateway extends AbstractComponent implements ClusterStateListener { private final ClusterService clusterService; @@ -138,7 +136,10 @@ public class Gateway extends AbstractComponent implements ClusterStateListener { indicesService.verifyIndexMetadata(nodeServicesProvider, electedIndexMetaData, electedIndexMetaData); } } catch (Exception e) { - logger.warn("recovering index {} failed - recovering as closed", e, electedIndexMetaData.getIndex()); + final Index electedIndex = electedIndexMetaData.getIndex(); + logger.warn( + (org.apache.logging.log4j.util.Supplier) + () -> new ParameterizedMessage("recovering index {} failed - recovering as closed", electedIndex), e); electedIndexMetaData = IndexMetaData.builder(electedIndexMetaData).state(IndexMetaData.State.CLOSE).build(); } diff --git a/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java b/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java index a9fbe0ac82e..c84a9c3378a 100644 --- a/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java +++ b/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java @@ -19,6 +19,7 @@ package org.elasticsearch.gateway; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.support.nodes.BaseNodeResponse; import org.elasticsearch.action.support.nodes.BaseNodesResponse; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -34,7 +35,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.index.shard.ShardId; @@ -140,7 +140,7 @@ public class GatewayAllocator extends AbstractComponent { class InternalAsyncFetch extends AsyncShardFetch { - public InternalAsyncFetch(ESLogger logger, String type, ShardId shardId, Lister, T> action) { + public InternalAsyncFetch(Logger logger, String type, ShardId shardId, Lister, T> action) { super(logger, type, shardId, action); } diff --git a/core/src/main/java/org/elasticsearch/gateway/GatewayService.java b/core/src/main/java/org/elasticsearch/gateway/GatewayService.java index 3282a8f2a4e..b953173c689 100644 --- a/core/src/main/java/org/elasticsearch/gateway/GatewayService.java +++ b/core/src/main/java/org/elasticsearch/gateway/GatewayService.java @@ -20,6 +20,8 @@ package org.elasticsearch.gateway; import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; @@ -289,7 +291,7 @@ public class GatewayService extends AbstractLifecycleComponent implements Cluste @Override public void onFailure(String source, Exception e) { - logger.error("unexpected failure during [{}]", e, source); + logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); GatewayRecoveryListener.this.onFailure("failed to updated cluster state"); } diff --git a/core/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java b/core/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java index ee987b8665f..707cc89704f 100644 --- a/core/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java +++ b/core/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java @@ -19,6 +19,8 @@ package org.elasticsearch.gateway; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.block.ClusterBlocks; @@ -148,8 +150,7 @@ public class LocalAllocateDangledIndices extends AbstractComponent { upgradedIndexMetaData = metaDataIndexUpgradeService.upgradeIndexMetaData(indexMetaData); } catch (Exception ex) { // upgrade failed - adding index as closed - logger.warn("found dangled index [{}] on node [{}]. This index cannot be upgraded to the latest version, adding as closed", ex, - indexMetaData.getIndex(), request.fromNode); + logger.warn((Supplier) () -> new ParameterizedMessage("found dangled index [{}] on node [{}]. This index cannot be upgraded to the latest version, adding as closed", indexMetaData.getIndex(), request.fromNode), ex); upgradedIndexMetaData = IndexMetaData.builder(indexMetaData).state(IndexMetaData.State.CLOSE).version(indexMetaData.getVersion() + 1).build(); } metaData.put(upgradedIndexMetaData, false); @@ -176,7 +177,7 @@ public class LocalAllocateDangledIndices extends AbstractComponent { @Override public void onFailure(String source, Exception e) { - logger.error("unexpected failure during [{}]", e, source); + logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); try { channel.sendResponse(e); } catch (Exception inner) { diff --git a/core/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java b/core/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java index cc6a48b855b..71c3190e2ee 100644 --- a/core/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java +++ b/core/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java @@ -18,6 +18,9 @@ */ package org.elasticsearch.gateway; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexFormatTooNewException; @@ -30,7 +33,6 @@ import org.apache.lucene.store.SimpleFSDirectory; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.lucene.store.IndexOutputOutputStream; import org.elasticsearch.common.lucene.store.InputStreamIndexInput; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -254,11 +256,11 @@ public abstract class MetaDataStateFormat { * the states version from one or more data directories and if none of the latest states can be loaded an exception * is thrown to prevent accidentally loading a previous state and silently omitting the latest state. * - * @param logger an elasticsearch logger instance + * @param logger a logger instance * @param dataLocations the data-locations to try. * @return the latest state or null if no state was found. */ - public T loadLatestState(ESLogger logger, Path... dataLocations) throws IOException { + public T loadLatestState(Logger logger, Path... dataLocations) throws IOException { List files = new ArrayList<>(); long maxStateId = -1; boolean maxStateIdIsLegacy = true; @@ -322,7 +324,9 @@ public abstract class MetaDataStateFormat { return state; } catch (Exception e) { exceptions.add(new IOException("failed to read " + pathAndStateId.toString(), e)); - logger.debug("{}: failed to read [{}], ignoring...", e, pathAndStateId.file.toAbsolutePath(), prefix); + logger.debug( + (Supplier) () -> new ParameterizedMessage( + "{}: failed to read [{}], ignoring...", pathAndStateId.file.toAbsolutePath(), prefix), e); } } // if we reach this something went wrong diff --git a/core/src/main/java/org/elasticsearch/gateway/MetaStateService.java b/core/src/main/java/org/elasticsearch/gateway/MetaStateService.java index b5ec8466c2c..e58a48d41b4 100644 --- a/core/src/main/java/org/elasticsearch/gateway/MetaStateService.java +++ b/core/src/main/java/org/elasticsearch/gateway/MetaStateService.java @@ -19,11 +19,12 @@ package org.elasticsearch.gateway; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; @@ -127,7 +128,7 @@ public class MetaStateService extends AbstractComponent { IndexMetaData.FORMAT.write(indexMetaData, nodeEnv.indexPaths(indexMetaData.getIndex())); } catch (Exception ex) { - logger.warn("[{}]: failed to write index state", ex, index); + logger.warn((Supplier) () -> new ParameterizedMessage("[{}]: failed to write index state", index), ex); throw new IOException("failed to write state for [" + index + "]", ex); } } diff --git a/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java b/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java index 9c5debe25e0..a11300ac496 100644 --- a/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java +++ b/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java @@ -19,6 +19,8 @@ package org.elasticsearch.gateway; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -226,7 +228,8 @@ public abstract class PrimaryShardAllocator extends AbstractComponent { logger.trace("[{}] on node [{}] has no allocation id, out-dated shard (shard state version: [{}])", shard, nodeShardState.getNode(), nodeShardState.legacyVersion()); } } else { - logger.trace("[{}] on node [{}] has allocation id [{}] but the store can not be opened, treating as no allocation id", nodeShardState.storeException(), shard, nodeShardState.getNode(), allocationId); + final String finalAllocationId = allocationId; + logger.trace((Supplier) () -> new ParameterizedMessage("[{}] on node [{}] has allocation id [{}] but the store can not be opened, treating as no allocation id", shard, nodeShardState.getNode(), finalAllocationId), nodeShardState.storeException()); allocationId = null; } @@ -350,8 +353,9 @@ public abstract class PrimaryShardAllocator extends AbstractComponent { logger.trace("[{}] on node [{}] has allocation id [{}]", shard, nodeShardState.getNode(), nodeShardState.allocationId()); } } else { + final long finalVerison = version; // when there is an store exception, we disregard the reported version and assign it as no version (same as shard does not exist) - logger.trace("[{}] on node [{}] has version [{}] but the store can not be opened, treating no version", nodeShardState.storeException(), shard, nodeShardState.getNode(), version); + logger.trace((Supplier) () -> new ParameterizedMessage("[{}] on node [{}] has version [{}] but the store can not be opened, treating no version", shard, nodeShardState.getNode(), finalVerison), nodeShardState.storeException()); version = ShardStateMetaData.NO_VERSION; } diff --git a/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java b/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java index fc23ef13581..31fc290c10c 100644 --- a/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java +++ b/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java @@ -19,6 +19,8 @@ package org.elasticsearch.gateway; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.FailedNodeException; @@ -140,8 +142,14 @@ public class TransportNodesListGatewayStartedShards extends } Store.tryOpenIndex(shardPath.resolveIndex(), shardId, nodeEnv::shardLock, logger); } catch (Exception exception) { - logger.trace("{} can't open index for shard [{}] in path [{}]", exception, shardId, - shardStateMetaData, (shardPath != null) ? shardPath.resolveIndex() : ""); + final ShardPath finalShardPath = shardPath; + logger.trace( + (Supplier) () -> new ParameterizedMessage( + "{} can't open index for shard [{}] in path [{}]", + shardId, + shardStateMetaData, + (finalShardPath != null) ? finalShardPath.resolveIndex() : ""), + exception); String allocationId = shardStateMetaData.allocationId != null ? shardStateMetaData.allocationId.getId() : null; return new NodeGatewayStartedShards(clusterService.localNode(), shardStateMetaData.legacyVersion, diff --git a/core/src/main/java/org/elasticsearch/http/HttpInfo.java b/core/src/main/java/org/elasticsearch/http/HttpInfo.java index 0f285974e8a..e8f3985a23a 100644 --- a/core/src/main/java/org/elasticsearch/http/HttpInfo.java +++ b/core/src/main/java/org/elasticsearch/http/HttpInfo.java @@ -21,7 +21,7 @@ package org.elasticsearch.http; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.ToXContent; @@ -29,15 +29,20 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; -/** - * - */ -public class HttpInfo implements Streamable, ToXContent { +public class HttpInfo implements Writeable, ToXContent { - private BoundTransportAddress address; - private long maxContentLength; + private final BoundTransportAddress address; + private final long maxContentLength; - HttpInfo() { + public HttpInfo(StreamInput in) throws IOException { + address = BoundTransportAddress.readBoundTransportAddress(in); + maxContentLength = in.readLong(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + address.writeTo(out); + out.writeLong(maxContentLength); } public HttpInfo(BoundTransportAddress address, long maxContentLength) { @@ -63,24 +68,6 @@ public class HttpInfo implements Streamable, ToXContent { return builder; } - public static HttpInfo readHttpInfo(StreamInput in) throws IOException { - HttpInfo info = new HttpInfo(); - info.readFrom(in); - return info; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - address = BoundTransportAddress.readBoundTransportAddress(in); - maxContentLength = in.readLong(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - address.writeTo(out); - out.writeLong(maxContentLength); - } - public BoundTransportAddress address() { return address; } diff --git a/core/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java b/core/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java index b155a436108..25acdd06b44 100644 --- a/core/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java +++ b/core/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java @@ -19,16 +19,13 @@ package org.elasticsearch.index; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; -/** - * - */ public abstract class AbstractIndexComponent implements IndexComponent { - protected final ESLogger logger; + protected final Logger logger; protected final DeprecationLogger deprecationLogger; protected final IndexSettings indexSettings; diff --git a/core/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java b/core/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java index 97e00b98df9..3b2cf5cbd07 100644 --- a/core/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java +++ b/core/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java @@ -19,9 +19,11 @@ package org.elasticsearch.index; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.IndexEventListener; @@ -40,7 +42,7 @@ import java.util.List; final class CompositeIndexEventListener implements IndexEventListener { private final List listeners; - private final ESLogger logger; + private final Logger logger; CompositeIndexEventListener(IndexSettings indexSettings, Collection listeners) { for (IndexEventListener listener : listeners) { @@ -58,7 +60,7 @@ final class CompositeIndexEventListener implements IndexEventListener { try { listener.shardRoutingChanged(indexShard, oldRouting, newRouting); } catch (Exception e) { - logger.warn("[{}] failed to invoke shard touring changed callback", e, indexShard.shardId().getId()); + logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to invoke shard touring changed callback", indexShard.shardId().getId()), e); } } } @@ -69,7 +71,7 @@ final class CompositeIndexEventListener implements IndexEventListener { try { listener.afterIndexShardCreated(indexShard); } catch (Exception e) { - logger.warn("[{}] failed to invoke after shard created callback", e, indexShard.shardId().getId()); + logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to invoke after shard created callback", indexShard.shardId().getId()), e); throw e; } } @@ -81,7 +83,7 @@ final class CompositeIndexEventListener implements IndexEventListener { try { listener.afterIndexShardStarted(indexShard); } catch (Exception e) { - logger.warn("[{}] failed to invoke after shard started callback", e, indexShard.shardId().getId()); + logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to invoke after shard started callback", indexShard.shardId().getId()), e); throw e; } } @@ -94,7 +96,7 @@ final class CompositeIndexEventListener implements IndexEventListener { try { listener.beforeIndexShardClosed(shardId, indexShard, indexSettings); } catch (Exception e) { - logger.warn("[{}] failed to invoke before shard closed callback", e, shardId.getId()); + logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to invoke before shard closed callback", shardId.getId()), e); throw e; } } @@ -107,7 +109,7 @@ final class CompositeIndexEventListener implements IndexEventListener { try { listener.afterIndexShardClosed(shardId, indexShard, indexSettings); } catch (Exception e) { - logger.warn("[{}] failed to invoke after shard closed callback", e, shardId.getId()); + logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to invoke after shard closed callback", shardId.getId()), e); throw e; } } @@ -119,7 +121,7 @@ final class CompositeIndexEventListener implements IndexEventListener { try { listener.onShardInactive(indexShard); } catch (Exception e) { - logger.warn("[{}] failed to invoke on shard inactive callback", e, indexShard.shardId().getId()); + logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to invoke on shard inactive callback", indexShard.shardId().getId()), e); throw e; } } @@ -131,7 +133,7 @@ final class CompositeIndexEventListener implements IndexEventListener { try { listener.indexShardStateChanged(indexShard, previousState, indexShard.state(), reason); } catch (Exception e) { - logger.warn("[{}] failed to invoke index shard state changed callback", e, indexShard.shardId().getId()); + logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to invoke index shard state changed callback", indexShard.shardId().getId()), e); throw e; } } @@ -167,7 +169,7 @@ final class CompositeIndexEventListener implements IndexEventListener { try { listener.beforeIndexShardCreated(shardId, indexSettings); } catch (Exception e) { - logger.warn("[{}] failed to invoke before shard created callback", e, shardId); + logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to invoke before shard created callback", shardId), e); throw e; } } @@ -228,7 +230,7 @@ final class CompositeIndexEventListener implements IndexEventListener { try { listener.beforeIndexShardDeleted(shardId, indexSettings); } catch (Exception e) { - logger.warn("[{}] failed to invoke before shard deleted callback", e, shardId.getId()); + logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to invoke before shard deleted callback", shardId.getId()), e); throw e; } } @@ -241,7 +243,7 @@ final class CompositeIndexEventListener implements IndexEventListener { try { listener.afterIndexShardDeleted(shardId, indexSettings); } catch (Exception e) { - logger.warn("[{}] failed to invoke after shard deleted callback", e, shardId.getId()); + logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to invoke after shard deleted callback", shardId.getId()), e); throw e; } } diff --git a/core/src/main/java/org/elasticsearch/index/IndexService.java b/core/src/main/java/org/elasticsearch/index/IndexService.java index b9f93bf2ac4..e662e46c79d 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexService.java +++ b/core/src/main/java/org/elasticsearch/index/IndexService.java @@ -19,6 +19,8 @@ package org.elasticsearch.index; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; @@ -92,9 +94,6 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder; -/** - * - */ public class IndexService extends AbstractIndexComponent implements IndicesClusterStateService.AllocatedIndex { private final IndexEventListener eventListener; @@ -397,7 +396,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust final boolean flushEngine = deleted.get() == false && closed.get(); indexShard.close(reason, flushEngine); } catch (Exception e) { - logger.debug("[{}] failed to close index shard", e, shardId); + logger.debug((Supplier) () -> new ParameterizedMessage("[{}] failed to close index shard", shardId), e); // ignore } } @@ -408,7 +407,9 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust try { store.close(); } catch (Exception e) { - logger.warn("[{}] failed to close store on shard removal (reason: [{}])", e, shardId, reason); + logger.warn( + (Supplier) () -> new ParameterizedMessage( + "[{}] failed to close store on shard removal (reason: [{}])", shardId, reason), e); } } } @@ -427,7 +428,9 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust } } catch (IOException e) { shardStoreDeleter.addPendingDelete(lock.getShardId(), indexSettings); - logger.debug("[{}] failed to delete shard content - scheduled a retry", e, lock.getShardId().id()); + logger.debug( + (Supplier) () -> new ParameterizedMessage( + "[{}] failed to delete shard content - scheduled a retry", lock.getShardId().id()), e); } } } @@ -639,7 +642,9 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust try { shard.onSettingsChanged(); } catch (Exception e) { - logger.warn("[{}] failed to notify shard about setting change", e, shard.shardId().id()); + logger.warn( + (Supplier) () -> new ParameterizedMessage( + "[{}] failed to notify shard about setting change", shard.shardId().id()), e); } } if (refreshTask.getInterval().equals(indexSettings.getRefreshInterval()) == false) { @@ -781,8 +786,11 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust } catch (Exception ex) { if (lastThrownException == null || sameException(lastThrownException, ex) == false) { // prevent the annoying fact of logging the same stuff all the time with an interval of 1 sec will spam all your logs - indexService.logger.warn("failed to run task {} - suppressing re-occurring exceptions unless the exception changes", - ex, toString()); + indexService.logger.warn( + (Supplier) () -> new ParameterizedMessage( + "failed to run task {} - suppressing re-occurring exceptions unless the exception changes", + toString()), + ex); lastThrownException = ex; } } finally { diff --git a/core/src/main/java/org/elasticsearch/index/IndexSettings.java b/core/src/main/java/org/elasticsearch/index/IndexSettings.java index dd0551aa5b6..5666fb416f0 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/core/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -18,11 +18,11 @@ */ package org.elasticsearch.index; +import org.apache.logging.log4j.Logger; import org.apache.lucene.index.MergePolicy; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.IndexScopedSettings; @@ -130,7 +130,7 @@ public final class IndexSettings { private final Index index; private final Version version; - private final ESLogger logger; + private final Logger logger; private final String nodeName; private final Settings nodeSettings; private final int numberOfShards; diff --git a/core/src/main/java/org/elasticsearch/index/IndexWarmer.java b/core/src/main/java/org/elasticsearch/index/IndexWarmer.java index ba48adb71a8..439acb239a3 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexWarmer.java +++ b/core/src/main/java/org/elasticsearch/index/IndexWarmer.java @@ -19,6 +19,8 @@ package org.elasticsearch.index; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.DirectoryReader; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; @@ -143,11 +145,18 @@ public final class IndexWarmer extends AbstractComponent { } if (indexShard.warmerService().logger().isTraceEnabled()) { - indexShard.warmerService().logger().trace("warmed global ordinals for [{}], took [{}]", fieldType.name(), + indexShard.warmerService().logger().trace( + "warmed global ordinals for [{}], took [{}]", + fieldType.name(), TimeValue.timeValueNanos(System.nanoTime() - start)); } } catch (Exception e) { - indexShard.warmerService().logger().warn("failed to warm-up global ordinals for [{}]", e, fieldType.name()); + indexShard + .warmerService() + .logger() + .warn( + (Supplier) () -> new ParameterizedMessage( + "failed to warm-up global ordinals for [{}]", fieldType.name()), e); } finally { latch.countDown(); } diff --git a/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java b/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java index a145012dd2b..513e87878d6 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java +++ b/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java @@ -19,9 +19,9 @@ package org.elasticsearch.index; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -34,8 +34,6 @@ import org.elasticsearch.index.shard.IndexingOperationListener; import java.io.IOException; import java.util.concurrent.TimeUnit; -/** - */ public final class IndexingSlowLog implements IndexingOperationListener { private final Index index; private boolean reformat; @@ -52,7 +50,7 @@ public final class IndexingSlowLog implements IndexingOperationListener { private SlowLogLevel level; - private final ESLogger indexLogger; + private final Logger indexLogger; private static final String INDEX_INDEXING_SLOWLOG_PREFIX = "index.indexing.slowlog"; public static final Setting INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING = @@ -112,7 +110,7 @@ public final class IndexingSlowLog implements IndexingOperationListener { private void setLevel(SlowLogLevel level) { this.level = level; - this.indexLogger.setLevel(level.name()); + Loggers.setLevel(this.indexLogger, level.name()); } private void setWarnThreshold(TimeValue warnThreshold) { diff --git a/core/src/main/java/org/elasticsearch/index/MergePolicyConfig.java b/core/src/main/java/org/elasticsearch/index/MergePolicyConfig.java index b5856d606e4..52b98e2bd01 100644 --- a/core/src/main/java/org/elasticsearch/index/MergePolicyConfig.java +++ b/core/src/main/java/org/elasticsearch/index/MergePolicyConfig.java @@ -19,11 +19,10 @@ package org.elasticsearch.index; +import org.apache.logging.log4j.Logger; import org.apache.lucene.index.MergePolicy; import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.TieredMergePolicy; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.ByteSizeUnit; @@ -117,7 +116,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; public final class MergePolicyConfig { private final TieredMergePolicy mergePolicy = new TieredMergePolicy(); - private final ESLogger logger; + private final Logger logger; private final boolean mergesEnabled; public static final double DEFAULT_EXPUNGE_DELETES_ALLOWED = 10d; @@ -155,7 +154,7 @@ public final class MergePolicyConfig { public static final String INDEX_MERGE_ENABLED = "index.merge.enabled"; // don't convert to Setting<> and register... we only set this in tests and register via a plugin - MergePolicyConfig(ESLogger logger, IndexSettings indexSettings) { + MergePolicyConfig(Logger logger, IndexSettings indexSettings) { this.logger = logger; double forceMergeDeletesPctAllowed = indexSettings.getValue(INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING); // percentage ByteSizeValue floorSegment = indexSettings.getValue(INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING); diff --git a/core/src/main/java/org/elasticsearch/index/SearchSlowLog.java b/core/src/main/java/org/elasticsearch/index/SearchSlowLog.java index 148b676331d..19086416b80 100644 --- a/core/src/main/java/org/elasticsearch/index/SearchSlowLog.java +++ b/core/src/main/java/org/elasticsearch/index/SearchSlowLog.java @@ -19,8 +19,8 @@ package org.elasticsearch.index; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -30,8 +30,6 @@ import org.elasticsearch.search.internal.SearchContext; import java.util.concurrent.TimeUnit; -/** - */ public final class SearchSlowLog implements SearchOperationListener { private boolean reformat; @@ -47,8 +45,8 @@ public final class SearchSlowLog implements SearchOperationListener { private SlowLogLevel level; - private final ESLogger queryLogger; - private final ESLogger fetchLogger; + private final Logger queryLogger; + private final Logger fetchLogger; private static final String INDEX_SEARCH_SLOWLOG_PREFIX = "index.search.slowlog"; public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING = @@ -113,8 +111,8 @@ public final class SearchSlowLog implements SearchOperationListener { private void setLevel(SlowLogLevel level) { this.level = level; - this.queryLogger.setLevel(level.name()); - this.fetchLogger.setLevel(level.name()); + Loggers.setLevel(queryLogger, level.name()); + Loggers.setLevel(fetchLogger, level.name()); } @Override public void onQueryPhase(SearchContext context, long tookInNanos) { diff --git a/core/src/main/java/org/elasticsearch/index/analysis/Analysis.java b/core/src/main/java/org/elasticsearch/index/analysis/Analysis.java index d2158f707ca..aded2bb4ee9 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/Analysis.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/Analysis.java @@ -19,10 +19,8 @@ package org.elasticsearch.index.analysis; -import org.apache.lucene.analysis.Analyzer; +import org.apache.logging.log4j.Logger; import org.apache.lucene.analysis.CharArraySet; -import org.apache.lucene.analysis.LegacyNumericTokenStream; -import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.ar.ArabicAnalyzer; import org.apache.lucene.analysis.bg.BulgarianAnalyzer; import org.apache.lucene.analysis.br.BrazilianAnalyzer; @@ -54,12 +52,10 @@ import org.apache.lucene.analysis.ro.RomanianAnalyzer; import org.apache.lucene.analysis.ru.RussianAnalyzer; import org.apache.lucene.analysis.sv.SwedishAnalyzer; import org.apache.lucene.analysis.th.ThaiAnalyzer; -import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tr.TurkishAnalyzer; import org.apache.lucene.util.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.FileSystemUtils; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; @@ -70,7 +66,6 @@ import java.io.Reader; import java.nio.charset.CharacterCodingException; import java.nio.charset.StandardCharsets; import java.nio.file.Path; -import java.nio.file.Paths; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -82,12 +77,9 @@ import java.util.Set; import static java.util.Collections.unmodifiableMap; -/** - * - */ public class Analysis { - public static Version parseAnalysisVersion(Settings indexSettings, Settings settings, ESLogger logger) { + public static Version parseAnalysisVersion(Settings indexSettings, Settings settings, Logger logger) { // check for explicit version on the specific analyzer component String sVersion = settings.get("version"); if (sVersion != null) { diff --git a/core/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java b/core/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java index cc3a90c4abc..0e4c54e7a7d 100644 --- a/core/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java +++ b/core/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java @@ -19,6 +19,8 @@ package org.elasticsearch.index.cache.bitset; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; @@ -258,7 +260,7 @@ public final class BitsetFilterCache extends AbstractIndexComponent implements L indexShard.warmerService().logger().trace("warmed bitset for [{}], took [{}]", filterToWarm, TimeValue.timeValueNanos(System.nanoTime() - start)); } } catch (Exception e) { - indexShard.warmerService().logger().warn("failed to load bitset for [{}]", e, filterToWarm); + indexShard.warmerService().logger().warn((Supplier) () -> new ParameterizedMessage("failed to load bitset for [{}]", filterToWarm), e); } finally { latch.countDown(); } diff --git a/core/src/main/java/org/elasticsearch/index/codec/CodecService.java b/core/src/main/java/org/elasticsearch/index/codec/CodecService.java index 65fab8d254a..59be64a85d6 100644 --- a/core/src/main/java/org/elasticsearch/index/codec/CodecService.java +++ b/core/src/main/java/org/elasticsearch/index/codec/CodecService.java @@ -19,12 +19,12 @@ package org.elasticsearch.index.codec; +import org.apache.logging.log4j.Logger; import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat.Mode; import org.apache.lucene.codecs.lucene62.Lucene62Codec; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.MapBuilder; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.index.mapper.MapperService; import java.util.Map; @@ -44,7 +44,7 @@ public class CodecService { /** the raw unfiltered lucene default. useful for testing */ public static final String LUCENE_DEFAULT_CODEC = "lucene_default"; - public CodecService(@Nullable MapperService mapperService, ESLogger logger) { + public CodecService(@Nullable MapperService mapperService, Logger logger) { final MapBuilder codecs = MapBuilder.newMapBuilder(); if (mapperService == null) { codecs.put(DEFAULT_CODEC, new Lucene62Codec()); diff --git a/core/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java b/core/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java index ec4636e396c..54f15feaa74 100644 --- a/core/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java +++ b/core/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java @@ -19,11 +19,11 @@ package org.elasticsearch.index.codec; +import org.apache.logging.log4j.Logger; import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.PostingsFormat; import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat; import org.apache.lucene.codecs.lucene62.Lucene62Codec; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.index.mapper.CompletionFieldMapper; import org.elasticsearch.index.mapper.CompletionFieldMapper2x; @@ -40,14 +40,14 @@ import org.elasticsearch.index.mapper.MapperService; */ // LUCENE UPGRADE: make sure to move to a new codec depending on the lucene version public class PerFieldMappingPostingFormatCodec extends Lucene62Codec { - private final ESLogger logger; + private final Logger logger; private final MapperService mapperService; static { assert Codec.forName(Lucene.LATEST_CODEC).getClass().isAssignableFrom(PerFieldMappingPostingFormatCodec.class) : "PerFieldMappingPostingFormatCodec must subclass the latest lucene codec: " + Lucene.LATEST_CODEC; } - public PerFieldMappingPostingFormatCodec(Lucene50StoredFieldsFormat.Mode compressionMode, MapperService mapperService, ESLogger logger) { + public PerFieldMappingPostingFormatCodec(Lucene50StoredFieldsFormat.Mode compressionMode, MapperService mapperService, Logger logger) { super(compressionMode); this.mapperService = mapperService; this.logger = logger; diff --git a/core/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java b/core/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java index 965a2e58f9c..466da06dec4 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java +++ b/core/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java @@ -19,12 +19,12 @@ package org.elasticsearch.index.engine; +import org.apache.logging.log4j.Logger; import org.apache.lucene.index.ConcurrentMergeScheduler; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.MergePolicy; import org.apache.lucene.index.MergeScheduler; import org.apache.lucene.index.OneMergeHelper; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.metrics.MeanMetric; @@ -34,9 +34,9 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.MergeSchedulerConfig; import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.merge.OnGoingMerge; -import org.elasticsearch.index.MergeSchedulerConfig; import org.elasticsearch.index.shard.ShardId; import java.io.IOException; @@ -50,7 +50,7 @@ import java.util.Set; */ class ElasticsearchConcurrentMergeScheduler extends ConcurrentMergeScheduler { - protected final ESLogger logger; + protected final Logger logger; private final Settings indexSettings; private final ShardId shardId; diff --git a/core/src/main/java/org/elasticsearch/index/engine/Engine.java b/core/src/main/java/org/elasticsearch/index/engine/Engine.java index 4ac09db8dc0..9df03beb1ab 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -19,6 +19,9 @@ package org.elasticsearch.index.engine; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.FilterLeafReader; import org.apache.lucene.index.IndexCommit; @@ -48,7 +51,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.uid.Versions; @@ -84,15 +86,12 @@ import java.util.concurrent.locks.ReentrantLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.Function; -/** - * - */ public abstract class Engine implements Closeable { public static final String SYNC_COMMIT_ID = "sync_id"; protected final ShardId shardId; - protected final ESLogger logger; + protected final Logger logger; protected final EngineConfig engineConfig; protected final Store store; protected final AtomicBoolean isClosed = new AtomicBoolean(false); @@ -364,7 +363,7 @@ public abstract class Engine implements Closeable { throw ex; } catch (Exception ex) { ensureOpen(); // throw EngineCloseException here if we are already closed - logger.error("failed to acquire searcher, source {}", ex, source); + logger.error((Supplier) () -> new ParameterizedMessage("failed to acquire searcher, source {}", source), ex); throw new EngineException(shardId, "failed to acquire searcher, source " + source, ex); } finally { if (!success) { // release the ref in the case of an error... @@ -443,8 +442,7 @@ public abstract class Engine implements Closeable { try { directory = engineConfig.getCodec().compoundFormat().getCompoundReader(segmentReader.directory(), segmentCommitInfo.info, IOContext.READ); } catch (IOException e) { - logger.warn("Error when opening compound reader for Directory [{}] and SegmentCommitInfo [{}]", e, - segmentReader.directory(), segmentCommitInfo); + logger.warn((Supplier) () -> new ParameterizedMessage("Error when opening compound reader for Directory [{}] and SegmentCommitInfo [{}]", segmentReader.directory(), segmentCommitInfo), e); return ImmutableOpenMap.of(); } @@ -459,14 +457,16 @@ public abstract class Engine implements Closeable { try { files = directory.listAll(); } catch (IOException e) { - logger.warn("Couldn't list Compound Reader Directory [{}]", e, directory); + final Directory finalDirectory = directory; + logger.warn( + (Supplier) () -> new ParameterizedMessage("Couldn't list Compound Reader Directory [{}]", finalDirectory), e); return ImmutableOpenMap.of(); } } else { try { files = segmentReader.getSegmentInfo().files().toArray(new String[]{}); } catch (IOException e) { - logger.warn("Couldn't list Directory from SegmentReader [{}] and SegmentInfo [{}]", e, segmentReader, segmentReader.getSegmentInfo()); + logger.warn((Supplier) () -> new ParameterizedMessage("Couldn't list Directory from SegmentReader [{}] and SegmentInfo [{}]", segmentReader, segmentReader.getSegmentInfo()), e); return ImmutableOpenMap.of(); } } @@ -480,7 +480,10 @@ public abstract class Engine implements Closeable { } catch (NoSuchFileException | FileNotFoundException e) { logger.warn("Tried to query fileLength but file is gone [{}] [{}]", e, directory, file); } catch (IOException e) { - logger.warn("Error when trying to query fileLength [{}] [{}]", e, directory, file); + final Directory finalDirectory = directory; + logger.warn( + (Supplier) + () -> new ParameterizedMessage("Error when trying to query fileLength [{}] [{}]", finalDirectory, file), e); } if (length == 0L) { continue; @@ -492,7 +495,10 @@ public abstract class Engine implements Closeable { try { directory.close(); } catch (IOException e) { - logger.warn("Error when closing compound reader on Directory [{}]", e, directory); + final Directory finalDirectory = directory; + logger.warn( + (Supplier) + () -> new ParameterizedMessage("Error when closing compound reader on Directory [{}]", finalDirectory), e); } } @@ -527,7 +533,7 @@ public abstract class Engine implements Closeable { try { segment.sizeInBytes = info.sizeInBytes(); } catch (IOException e) { - logger.trace("failed to get size for [{}]", e, info.info.name); + logger.trace((Supplier) () -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e); } final SegmentReader segmentReader = segmentReader(reader.reader()); segment.memoryInBytes = segmentReader.ramBytesUsed(); @@ -557,7 +563,7 @@ public abstract class Engine implements Closeable { try { segment.sizeInBytes = info.sizeInBytes(); } catch (IOException e) { - logger.trace("failed to get size for [{}]", e, info.info.name); + logger.trace((Supplier) () -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e); } segments.put(info.info.name, segment); } else { @@ -669,10 +675,10 @@ public abstract class Engine implements Closeable { closeNoLock("engine failed on: [" + reason + "]"); } finally { if (failedEngine != null) { - logger.debug("tried to fail engine but engine is already failed. ignoring. [{}]", failure, reason); + logger.debug((Supplier) () -> new ParameterizedMessage("tried to fail engine but engine is already failed. ignoring. [{}]", reason), failure); return; } - logger.warn("failed engine [{}]", failure, reason); + logger.warn((Supplier) () -> new ParameterizedMessage("failed engine [{}]", reason), failure); // we must set a failure exception, generate one if not supplied failedEngine = (failure != null) ? failure : new IllegalStateException(reason); // we first mark the store as corrupted before we notify any listeners @@ -696,7 +702,7 @@ public abstract class Engine implements Closeable { store.decRef(); } } else { - logger.debug("tried to fail engine but could not acquire lock - engine should be failed by now [{}]", failure, reason); + logger.debug((Supplier) () -> new ParameterizedMessage("tried to fail engine but could not acquire lock - engine should be failed by now [{}]", reason), failure); } } @@ -847,19 +853,24 @@ public abstract class Engine implements Closeable { public static class Index extends Operation { private final ParsedDocument doc; + private final long autoGeneratedIdTimestamp; + private final boolean isRetry; private boolean created; - public Index(Term uid, ParsedDocument doc, long version, VersionType versionType, Origin origin, long startTime) { + public Index(Term uid, ParsedDocument doc, long version, VersionType versionType, Origin origin, long startTime, + long autoGeneratedIdTimestamp, boolean isRetry) { super(uid, version, versionType, origin, startTime); this.doc = doc; + this.isRetry = isRetry; + this.autoGeneratedIdTimestamp = autoGeneratedIdTimestamp; } public Index(Term uid, ParsedDocument doc) { this(uid, doc, Versions.MATCH_ANY); - } + } // TEST ONLY - public Index(Term uid, ParsedDocument doc, long version) { - this(uid, doc, version, VersionType.INTERNAL, Origin.PRIMARY, System.nanoTime()); + Index(Term uid, ParsedDocument doc, long version) { + this(uid, doc, version, VersionType.INTERNAL, Origin.PRIMARY, System.nanoTime(), -1, false); } public ParsedDocument parsedDoc() { @@ -919,6 +930,23 @@ public abstract class Engine implements Closeable { return (id().length() + type().length()) * 2 + source().length() + 12; } + /** + * Returns a positive timestamp if the ID of this document is auto-generated by elasticsearch. + * if this property is non-negative indexing code might optimize the addition of this document + * due to it's append only nature. + */ + public long getAutoGeneratedIdTimestamp() { + return autoGeneratedIdTimestamp; + } + + /** + * Returns true if this index requests has been retried on the coordinating node and can therefor be delivered + * multiple times. Note: this might also be set to true if an equivalent event occurred like the replay of the transaction log + */ + public boolean isRetry() { + return isRetry; + } + } public static class Delete extends Operation { diff --git a/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java b/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java index 13408408e7e..9f9d2186a83 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java +++ b/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java @@ -25,6 +25,7 @@ import org.apache.lucene.index.SnapshotDeletionPolicy; import org.apache.lucene.search.QueryCache; import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.similarities.Similarity; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -65,6 +66,7 @@ public final class EngineConfig { private final Engine.EventListener eventListener; private final QueryCache queryCache; private final QueryCachingPolicy queryCachingPolicy; + private final long maxUnsafeAutoIdTimestamp; @Nullable private final RefreshListeners refreshListeners; @@ -89,7 +91,17 @@ public final class EngineConfig { } }, Property.IndexScope, Property.NodeScope); - private TranslogConfig translogConfig; + /** + * Configures an index to optimize documents with auto generated ids for append only. If this setting is updated from false + * to true might not take effect immediately. In other words, disabling the optimiation will be immediately applied while + * re-enabling it might not be applied until the engine is in a safe state to do so. Depending on the engine implementation a change to + * this setting won't be reflected re-enabled optimization until the engine is restarted or the index is closed and reopened. + * The default is true + */ + public static final Setting INDEX_OPTIMIZE_AUTO_GENERATED_IDS = Setting.boolSetting("index.optimize_auto_generated_id", true, + Property.IndexScope, Property.Dynamic); + + private final TranslogConfig translogConfig; private final OpenMode openMode; /** @@ -97,10 +109,11 @@ public final class EngineConfig { */ public EngineConfig(OpenMode openMode, ShardId shardId, ThreadPool threadPool, IndexSettings indexSettings, Engine.Warmer warmer, Store store, SnapshotDeletionPolicy deletionPolicy, - MergePolicy mergePolicy,Analyzer analyzer, + MergePolicy mergePolicy, Analyzer analyzer, Similarity similarity, CodecService codecService, Engine.EventListener eventListener, TranslogRecoveryPerformer translogRecoveryPerformer, QueryCache queryCache, QueryCachingPolicy queryCachingPolicy, - TranslogConfig translogConfig, TimeValue flushMergesAfter, RefreshListeners refreshListeners) { + TranslogConfig translogConfig, TimeValue flushMergesAfter, RefreshListeners refreshListeners, + long maxUnsafeAutoIdTimestamp) { if (openMode == null) { throw new IllegalArgumentException("openMode must not be null"); } @@ -127,6 +140,9 @@ public final class EngineConfig { this.flushMergesAfter = flushMergesAfter; this.openMode = openMode; this.refreshListeners = refreshListeners; + assert maxUnsafeAutoIdTimestamp >= IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP : + "maxUnsafeAutoIdTimestamp must be >= -1 but was " + maxUnsafeAutoIdTimestamp; + this.maxUnsafeAutoIdTimestamp = maxUnsafeAutoIdTimestamp; } /** @@ -311,4 +327,12 @@ public final class EngineConfig { public RefreshListeners getRefreshListeners() { return refreshListeners; } + + /** + * Returns the max timestamp that is used to de-optimize documents with auto-generated IDs in the engine. + * This is used to ensure we don't add duplicate documents when we assume an append only case based on auto-generated IDs + */ + public long getMaxUnsafeAutoIdTimestamp() { + return indexSettings.getValue(INDEX_OPTIMIZE_AUTO_GENERATED_IDS) ? maxUnsafeAutoIdTimestamp : Long.MAX_VALUE; + } } diff --git a/core/src/main/java/org/elasticsearch/index/engine/EngineSearcher.java b/core/src/main/java/org/elasticsearch/index/engine/EngineSearcher.java index b32d4aa0bb8..a53ac1dd415 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/EngineSearcher.java +++ b/core/src/main/java/org/elasticsearch/index/engine/EngineSearcher.java @@ -19,10 +19,10 @@ package org.elasticsearch.index.engine; +import org.apache.logging.log4j.Logger; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.SearcherManager; import org.apache.lucene.store.AlreadyClosedException; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.index.store.Store; import java.io.IOException; @@ -35,9 +35,9 @@ public class EngineSearcher extends Engine.Searcher { private final SearcherManager manager; private final AtomicBoolean released = new AtomicBoolean(false); private final Store store; - private final ESLogger logger; + private final Logger logger; - public EngineSearcher(String source, IndexSearcher searcher, SearcherManager manager, Store store, ESLogger logger) { + public EngineSearcher(String source, IndexSearcher searcher, SearcherManager manager, Store store, Logger logger) { super(source, searcher); this.manager = manager; this.store = store; diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index b12bfe98bbe..774465fb71a 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.engine; +import org.apache.logging.log4j.Logger; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexFormatTooOldException; @@ -41,18 +42,21 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.InfoStream; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.Version; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.lucene.LoggerInfoStream; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.lucene.uid.Versions; +import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.KeyedLock; import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.merge.OnGoingMerge; @@ -73,6 +77,7 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import java.util.function.Function; @@ -115,10 +120,19 @@ public class InternalEngine extends Engine { private final AtomicInteger throttleRequestCount = new AtomicInteger(); private final EngineConfig.OpenMode openMode; private final AtomicBoolean allowCommits = new AtomicBoolean(true); + private final AtomicLong maxUnsafeAutoIdTimestamp = new AtomicLong(-1); + private final CounterMetric numVersionLookups = new CounterMetric(); + private final CounterMetric numIndexVersionsLookups = new CounterMetric(); public InternalEngine(EngineConfig engineConfig) throws EngineException { super(engineConfig); openMode = engineConfig.getOpenMode(); + if (engineConfig.getIndexSettings().getIndexVersionCreated().before(Version.V_5_0_0_alpha6)) { + // no optimization for pre 5.0.0.alpha6 since translog might not have all information needed + maxUnsafeAutoIdTimestamp.set(Long.MAX_VALUE); + } else { + maxUnsafeAutoIdTimestamp.set(engineConfig.getMaxUnsafeAutoIdTimestamp()); + } this.versionMap = new LiveVersionMap(); store.incRef(); IndexWriter writer = null; @@ -407,30 +421,106 @@ public class InternalEngine extends Engine { } } + private boolean canOptimizeAddDocument(Index index) { + if (index.getAutoGeneratedIdTimestamp() != IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP) { + assert index.getAutoGeneratedIdTimestamp() >= 0 : "autoGeneratedIdTimestamp must be positive but was: " + + index.getAutoGeneratedIdTimestamp(); + switch (index.origin()) { + case PRIMARY: + assert (index.version() == Versions.MATCH_ANY && index.versionType() == VersionType.INTERNAL) + : "version: " + index.version() + " type: " + index.versionType(); + return true; + case PEER_RECOVERY: + case REPLICA: + assert index.version() == 1 && index.versionType() == VersionType.EXTERNAL + : "version: " + index.version() + " type: " + index.versionType(); + return true; + case LOCAL_TRANSLOG_RECOVERY: + assert index.isRetry(); + return false; // even if retry is set we never optimize local recovery + default: + throw new IllegalArgumentException("unknown origin " + index.origin()); + } + } + return false; + } + private void innerIndex(Index index) throws IOException { try (Releasable ignored = acquireLock(index.uid())) { lastWriteNanos = index.startTime(); - final long currentVersion; + /* if we have an autoGeneratedID that comes into the engine we can potentially optimize + * and just use addDocument instead of updateDocument and skip the entire version and index lookup across the board. + * Yet, we have to deal with multiple document delivery, for this we use a property of the document that is added + * to detect if it has potentially been added before. We use the documents timestamp for this since it's something + * that: + * - doesn't change per document + * - is preserved in the transaction log + * - and is assigned before we start to index / replicate + * NOTE: it's not important for this timestamp to be consistent across nodes etc. it's just a number that is in the common + * case increasing and can be used in the failure case when we retry and resent documents to establish a happens before relationship. + * for instance: + * - doc A has autoGeneratedIdTimestamp = 10, isRetry = false + * - doc B has autoGeneratedIdTimestamp = 9, isRetry = false + * + * while both docs are in in flight, we disconnect on one node, reconnect and send doc A again + * - now doc A' has autoGeneratedIdTimestamp = 10, isRetry = true + * + * if A' arrives on the shard first we update maxUnsafeAutoIdTimestamp to 10 and use update document. All subsequent + * documents that arrive (A and B) will also use updateDocument since their timestamps are less than maxUnsafeAutoIdTimestamp. + * While this is not strictly needed for doc B it is just much simpler to implement since it will just de-optimize some doc in the worst case. + * + * if A arrives on the shard first we use addDocument since maxUnsafeAutoIdTimestamp is < 10. A` will then just be skipped or calls + * updateDocument. + */ + long currentVersion; final boolean deleted; - final VersionValue versionValue = versionMap.getUnderLock(index.uid()); - if (versionValue == null) { - currentVersion = loadCurrentVersionFromIndex(index.uid()); - deleted = currentVersion == Versions.NOT_FOUND; + // if anything is fishy here ie. there is a retry we go and force updateDocument below so we are updating the document in the + // lucene index without checking the version map but we still do the version check + final boolean forceUpdateDocument; + if (canOptimizeAddDocument(index)) { + long deOptimizeTimestamp = maxUnsafeAutoIdTimestamp.get(); + if (index.isRetry()) { + forceUpdateDocument = true; + do { + deOptimizeTimestamp = maxUnsafeAutoIdTimestamp.get(); + if (deOptimizeTimestamp >= index.getAutoGeneratedIdTimestamp()) { + break; + } + } while(maxUnsafeAutoIdTimestamp.compareAndSet(deOptimizeTimestamp, + index.getAutoGeneratedIdTimestamp()) == false); + assert maxUnsafeAutoIdTimestamp.get() >= index.getAutoGeneratedIdTimestamp(); + } else { + // in this case we force + forceUpdateDocument = deOptimizeTimestamp >= index.getAutoGeneratedIdTimestamp(); + } + currentVersion = Versions.NOT_FOUND; + deleted = true; } else { - currentVersion = checkDeletedAndGCed(versionValue); - deleted = versionValue.delete(); + // update the document + forceUpdateDocument = false; // we don't force it - it depends on the version + final VersionValue versionValue = versionMap.getUnderLock(index.uid()); + assert incrementVersionLookup(); + if (versionValue == null) { + currentVersion = loadCurrentVersionFromIndex(index.uid()); + deleted = currentVersion == Versions.NOT_FOUND; + } else { + currentVersion = checkDeletedAndGCed(versionValue); + deleted = versionValue.delete(); + } } - final long expectedVersion = index.version(); if (checkVersionConflict(index, currentVersion, expectedVersion, deleted)) { index.setCreated(false); return; } - final long updatedVersion = updateVersion(index, currentVersion, expectedVersion); - - indexOrUpdate(index, currentVersion, versionValue); - + index.setCreated(deleted); + if (currentVersion == Versions.NOT_FOUND && forceUpdateDocument == false) { + // document does not exists, we can optimize for create + index(index, indexWriter); + } else { + update(index, indexWriter); + } maybeAddToTranslog(index, updatedVersion, Translog.Index::new, NEW_VERSION_VALUE); } } @@ -441,16 +531,6 @@ public class InternalEngine extends Engine { return updatedVersion; } - private void indexOrUpdate(final Index index, final long currentVersion, final VersionValue versionValue) throws IOException { - if (currentVersion == Versions.NOT_FOUND) { - // document does not exists, we can optimize for create - index.setCreated(true); - index(index, indexWriter); - } else { - update(index, versionValue, indexWriter); - } - } - private static void index(final Index index, final IndexWriter indexWriter) throws IOException { if (index.docs().size() > 1) { indexWriter.addDocuments(index.docs()); @@ -459,12 +539,7 @@ public class InternalEngine extends Engine { } } - private static void update(final Index index, final VersionValue versionValue, final IndexWriter indexWriter) throws IOException { - if (versionValue != null) { - index.setCreated(versionValue.delete()); // we have a delete which is not GC'ed... - } else { - index.setCreated(false); - } + private static void update(final Index index, final IndexWriter indexWriter) throws IOException { if (index.docs().size() > 1) { indexWriter.updateDocuments(index.uid(), index.docs()); } else { @@ -504,6 +579,7 @@ public class InternalEngine extends Engine { final long currentVersion; final boolean deleted; final VersionValue versionValue = versionMap.getUnderLock(delete.uid()); + assert incrementVersionLookup(); if (versionValue == null) { currentVersion = loadCurrentVersionFromIndex(delete.uid()); deleted = currentVersion == Versions.NOT_FOUND; @@ -914,6 +990,7 @@ public class InternalEngine extends Engine { protected final void writerSegmentStats(SegmentsStats stats) { stats.addVersionMapMemoryInBytes(versionMap.ramBytesUsed()); stats.addIndexWriterMemoryInBytes(indexWriter.ramBytesUsed()); + stats.updateMaxUnsafeAutoIdTimestamp(maxUnsafeAutoIdTimestamp.get()); } @Override @@ -996,6 +1073,7 @@ public class InternalEngine extends Engine { } private long loadCurrentVersionFromIndex(Term uid) throws IOException { + assert incrementIndexVersionLookup(); try (final Searcher searcher = acquireSearcher("load_version")) { return Versions.loadVersion(searcher.reader(), uid); } @@ -1034,10 +1112,10 @@ public class InternalEngine extends Engine { /** Extended SearcherFactory that warms the segments if needed when acquiring a new searcher */ static final class SearchFactory extends EngineSearcherFactory { private final Engine.Warmer warmer; - private final ESLogger logger; + private final Logger logger; private final AtomicBoolean isEngineClosed; - SearchFactory(ESLogger logger, AtomicBoolean isEngineClosed, EngineConfig engineConfig) { + SearchFactory(Logger logger, AtomicBoolean isEngineClosed, EngineConfig engineConfig) { super(engineConfig); warmer = engineConfig.getWarmer(); this.logger = logger; @@ -1222,6 +1300,12 @@ public class InternalEngine extends Engine { mergeScheduler.refreshConfig(); // config().isEnableGcDeletes() or config.getGcDeletesInMillis() may have changed: maybePruneDeletedTombstones(); + if (engineConfig.getMaxUnsafeAutoIdTimestamp() == Long.MAX_VALUE) { + // this is an anti-viral settings you can only opt out for the entire index + // only if a shard starts up again due to relocation or if the index is closed + // the setting will be re-interpreted if it's set to true + this.maxUnsafeAutoIdTimestamp.set(Long.MAX_VALUE); + } } public MergeStats getMergeStats() { @@ -1234,4 +1318,39 @@ public class InternalEngine extends Engine { final int maxDoc = indexWriter.maxDoc(); return new DocsStats(numDocs, maxDoc-numDocs); } + + + /** + * Returns the number of times a version was looked up either from the index. + * Note this is only available if assertions are enabled + */ + long getNumIndexVersionsLookups() { // for testing + return numIndexVersionsLookups.count(); + } + + /** + * Returns the number of times a version was looked up either from memory or from the index. + * Note this is only available if assertions are enabled + */ + long getNumVersionLookups() { // for testing + return numVersionLookups.count(); + } + + private boolean incrementVersionLookup() { // only used by asserts + numVersionLookups.inc(); + return true; + } + + private boolean incrementIndexVersionLookup() { + numIndexVersionsLookups.inc(); + return true; + } + + /** + * Returns true iff the index writer has any deletions either buffered in memory or + * in the index. + */ + boolean indexWriterHasDeletions() { + return indexWriter.hasDeletions(); + } } diff --git a/core/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java b/core/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java index 8f0388aef05..637beebfec8 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java +++ b/core/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java @@ -44,6 +44,7 @@ public class SegmentsStats implements Streamable, ToXContent { private long docValuesMemoryInBytes; private long indexWriterMemoryInBytes; private long versionMapMemoryInBytes; + private long maxUnsafeAutoIdTimestamp = Long.MIN_VALUE; private long bitsetMemoryInBytes; private ImmutableOpenMap fileSizes = ImmutableOpenMap.of(); @@ -114,6 +115,10 @@ public class SegmentsStats implements Streamable, ToXContent { this.versionMapMemoryInBytes += versionMapMemoryInBytes; } + void updateMaxUnsafeAutoIdTimestamp(long maxUnsafeAutoIdTimestamp) { + this.maxUnsafeAutoIdTimestamp = Math.max(maxUnsafeAutoIdTimestamp, this.maxUnsafeAutoIdTimestamp); + } + public void addBitsetMemoryInBytes(long bitsetMemoryInBytes) { this.bitsetMemoryInBytes += bitsetMemoryInBytes; } @@ -138,6 +143,7 @@ public class SegmentsStats implements Streamable, ToXContent { if (mergeStats == null) { return; } + updateMaxUnsafeAutoIdTimestamp(mergeStats.maxUnsafeAutoIdTimestamp); add(mergeStats.count, mergeStats.memoryInBytes); addTermsMemoryInBytes(mergeStats.termsMemoryInBytes); addStoredFieldsMemoryInBytes(mergeStats.storedFieldsMemoryInBytes); @@ -272,6 +278,14 @@ public class SegmentsStats implements Streamable, ToXContent { return fileSizes; } + /** + * Returns the max timestamp that is used to de-optimize documents with auto-generated IDs in the engine. + * This is used to ensure we don't add duplicate documents when we assume an append only case based on auto-generated IDs + */ + public long getMaxUnsafeAutoIdTimestamp() { + return maxUnsafeAutoIdTimestamp; + } + public static SegmentsStats readSegmentsStats(StreamInput in) throws IOException { SegmentsStats stats = new SegmentsStats(); stats.readFrom(in); @@ -292,6 +306,7 @@ public class SegmentsStats implements Streamable, ToXContent { builder.byteSizeField(Fields.INDEX_WRITER_MEMORY_IN_BYTES, Fields.INDEX_WRITER_MEMORY, indexWriterMemoryInBytes); builder.byteSizeField(Fields.VERSION_MAP_MEMORY_IN_BYTES, Fields.VERSION_MAP_MEMORY, versionMapMemoryInBytes); builder.byteSizeField(Fields.FIXED_BIT_SET_MEMORY_IN_BYTES, Fields.FIXED_BIT_SET, bitsetMemoryInBytes); + builder.field(Fields.MAX_UNSAFE_AUTO_ID_TIMESTAMP, maxUnsafeAutoIdTimestamp); builder.startObject(Fields.FILE_SIZES); for (Iterator> it = fileSizes.iterator(); it.hasNext();) { ObjectObjectCursor entry = it.next(); @@ -326,6 +341,7 @@ public class SegmentsStats implements Streamable, ToXContent { static final String INDEX_WRITER_MEMORY_IN_BYTES = "index_writer_memory_in_bytes"; static final String VERSION_MAP_MEMORY = "version_map_memory"; static final String VERSION_MAP_MEMORY_IN_BYTES = "version_map_memory_in_bytes"; + static final String MAX_UNSAFE_AUTO_ID_TIMESTAMP = "max_unsafe_auto_id_timestamp"; static final String FIXED_BIT_SET = "fixed_bit_set"; static final String FIXED_BIT_SET_MEMORY_IN_BYTES = "fixed_bit_set_memory_in_bytes"; static final String FILE_SIZES = "file_sizes"; @@ -347,6 +363,7 @@ public class SegmentsStats implements Streamable, ToXContent { indexWriterMemoryInBytes = in.readLong(); versionMapMemoryInBytes = in.readLong(); bitsetMemoryInBytes = in.readLong(); + maxUnsafeAutoIdTimestamp = in.readLong(); int size = in.readVInt(); ImmutableOpenMap.Builder map = ImmutableOpenMap.builder(size); @@ -371,6 +388,7 @@ public class SegmentsStats implements Streamable, ToXContent { out.writeLong(indexWriterMemoryInBytes); out.writeLong(versionMapMemoryInBytes); out.writeLong(bitsetMemoryInBytes); + out.writeLong(maxUnsafeAutoIdTimestamp); out.writeVInt(fileSizes.size()); for (Iterator> it = fileSizes.iterator(); it.hasNext();) { diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalsBuilder.java b/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalsBuilder.java index 2fa4476c0d9..aaecf2fa896 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalsBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalsBuilder.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.fielddata.ordinals; +import org.apache.logging.log4j.Logger; import org.apache.lucene.index.DocValues; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.MultiDocValues.OrdinalMap; @@ -26,7 +27,6 @@ import org.apache.lucene.index.RandomAccessOrds; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.packed.PackedInts; import org.elasticsearch.common.breaker.CircuitBreaker; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.fielddata.AtomicOrdinalsFieldData; @@ -48,7 +48,7 @@ public enum GlobalOrdinalsBuilder { /** * Build global ordinals for the provided {@link IndexReader}. */ - public static IndexOrdinalsFieldData build(final IndexReader indexReader, IndexOrdinalsFieldData indexFieldData, IndexSettings indexSettings, CircuitBreakerService breakerService, ESLogger logger) throws IOException { + public static IndexOrdinalsFieldData build(final IndexReader indexReader, IndexOrdinalsFieldData indexFieldData, IndexSettings indexSettings, CircuitBreakerService breakerService, Logger logger) throws IOException { assert indexReader.leaves().size() > 1; long startTimeNS = System.nanoTime(); diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java index 83bdaf221b0..46218763996 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java @@ -19,10 +19,9 @@ package org.elasticsearch.index.fielddata.plain; +import org.apache.logging.log4j.Logger; import org.apache.lucene.index.IndexReader; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.fielddata.IndexFieldData; @@ -34,7 +33,6 @@ import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.UidFieldMapper; import org.elasticsearch.indices.breaker.CircuitBreakerService; -import java.util.Map; import java.util.Set; import static java.util.Collections.unmodifiableSet; @@ -45,7 +43,7 @@ public abstract class DocValuesIndexFieldData { protected final Index index; protected final String fieldName; - protected final ESLogger logger; + protected final Logger logger; public DocValuesIndexFieldData(Index index, String fieldName) { super(); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/LegacyIpIndexFieldData.java b/core/src/main/java/org/elasticsearch/index/mapper/LegacyIpIndexFieldData.java index ffad4deeb59..feb3328227d 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/LegacyIpIndexFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/LegacyIpIndexFieldData.java @@ -18,33 +18,33 @@ */ package org.elasticsearch.index.mapper; -import java.io.IOException; -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.nio.ByteBuffer; - +import org.apache.logging.log4j.Logger; import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.index.DocValues; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.index.Index; import org.elasticsearch.index.fielddata.AtomicFieldData; import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; -import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource; import org.elasticsearch.search.MultiValueMode; +import java.io.IOException; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.nio.ByteBuffer; + final class LegacyIpIndexFieldData implements IndexFieldData { protected final Index index; protected final String fieldName; - protected final ESLogger logger; + protected final Logger logger; public LegacyIpIndexFieldData(Index index, String fieldName) { this.index = index; diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 43bf505da4c..148fde7b648 100755 --- a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -21,6 +21,7 @@ package org.elasticsearch.index.mapper; import com.carrotsearch.hppc.ObjectHashSet; import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.DelegatingAnalyzerWrapper; import org.elasticsearch.ElasticsearchGenerationException; @@ -216,7 +217,10 @@ public class MapperService extends AbstractIndexComponent { } } } catch (Exception e) { - logger.warn("[{}] failed to add mapping [{}], source [{}]", e, index(), mappingType, mappingSource); + logger.warn( + (org.apache.logging.log4j.util.Supplier) + () -> new ParameterizedMessage("[{}] failed to add mapping [{}], source [{}]", index(), mappingType, mappingSource), + e); throw e; } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java b/core/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java index 05b1746b742..85367e624d8 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java @@ -50,7 +50,8 @@ public class ParsedDocument { private String parent; - public ParsedDocument(Field version, String id, String type, String routing, long timestamp, long ttl, List documents, BytesReference source, Mapping dynamicMappingsUpdate) { + public ParsedDocument(Field version, String id, String type, String routing, long timestamp, long ttl, List documents, + BytesReference source, Mapping dynamicMappingsUpdate) { this.version = version; this.id = id; this.type = type; diff --git a/core/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java index bcad6aa063e..3608da30f76 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java @@ -244,7 +244,7 @@ public class ScaledFloatFieldMapper extends FieldMapper { lo = Math.round(Math.ceil(dValue * scalingFactor)); } Long hi = null; - if (lowerTerm != null) { + if (upperTerm != null) { double dValue = NumberFieldMapper.NumberType.DOUBLE.parse(upperTerm).doubleValue(); if (includeUpper == false) { dValue = Math.nextDown(dValue); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/StringFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/StringFieldMapper.java index 5dc6efb28f7..ec7a90148ad 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/StringFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/StringFieldMapper.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.mapper; +import org.apache.logging.log4j.Logger; import org.apache.lucene.document.Field; import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.IndexOptions; @@ -26,7 +27,6 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -187,7 +187,7 @@ public class StringFieldMapper extends FieldMapper { private final DeprecationLogger deprecationLogger; public TypeParser() { - ESLogger logger = Loggers.getLogger(getClass()); + Logger logger = Loggers.getLogger(getClass()); this.deprecationLogger = new DeprecationLogger(logger); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java b/core/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java index e388c8ea576..eaa97ac5100 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java @@ -22,7 +22,6 @@ package org.elasticsearch.index.mapper; import org.apache.lucene.index.IndexOptions; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.joda.FormatDateTimeFormatter; import org.elasticsearch.common.joda.Joda; import org.elasticsearch.common.logging.DeprecationLogger; @@ -31,7 +30,6 @@ import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.similarity.SimilarityProvider; -import org.elasticsearch.index.similarity.SimilarityService; import java.util.Arrays; import java.util.Collections; diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryParseContext.java b/core/src/main/java/org/elasticsearch/index/query/QueryParseContext.java index 95fe0094bad..9ed374db212 100644 --- a/core/src/main/java/org/elasticsearch/index/query/QueryParseContext.java +++ b/core/src/main/java/org/elasticsearch/index/query/QueryParseContext.java @@ -25,8 +25,11 @@ import org.elasticsearch.common.ParseFieldMatcherSupplier; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.indices.query.IndicesQueriesRegistry; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptSettings; import java.io.IOException; import java.util.Objects; @@ -42,11 +45,18 @@ public class QueryParseContext implements ParseFieldMatcherSupplier { private final XContentParser parser; private final IndicesQueriesRegistry indicesQueriesRegistry; private final ParseFieldMatcher parseFieldMatcher; + private final String defaultScriptLanguage; public QueryParseContext(IndicesQueriesRegistry registry, XContentParser parser, ParseFieldMatcher parseFieldMatcher) { + this(Script.DEFAULT_SCRIPT_LANG, registry, parser, parseFieldMatcher); + } + + public QueryParseContext(String defaultScriptLanguage, IndicesQueriesRegistry registry, XContentParser parser, + ParseFieldMatcher parseFieldMatcher) { this.indicesQueriesRegistry = Objects.requireNonNull(registry, "indices queries registry cannot be null"); this.parser = Objects.requireNonNull(parser, "parser cannot be null"); this.parseFieldMatcher = Objects.requireNonNull(parseFieldMatcher, "parse field matcher cannot be null"); + this.defaultScriptLanguage = defaultScriptLanguage; } public XContentParser parser() { @@ -127,4 +137,12 @@ public class QueryParseContext implements ParseFieldMatcherSupplier { public ParseFieldMatcher getParseFieldMatcher() { return parseFieldMatcher; } + + /** + * Returns the default scripting language, that should be used if scripts don't specify the script language + * explicitly. + */ + public String getDefaultScriptLanguage() { + return defaultScriptLanguage; + } } diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java b/core/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java index d8ec9ef2a47..f12605088e6 100644 --- a/core/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java +++ b/core/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java @@ -28,6 +28,7 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.indices.query.IndicesQueriesRegistry; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptSettings; /** * Context object used to rewrite {@link QueryBuilder} instances into simplified version. @@ -101,9 +102,18 @@ public class QueryRewriteContext implements ParseFieldMatcherSupplier { /** * Returns a new {@link QueryParseContext} that wraps the provided parser, using the ParseFieldMatcher settings that - * are configured in the index settings + * are configured in the index settings. The default script language will always default to Painless. */ public QueryParseContext newParseContext(XContentParser parser) { return new QueryParseContext(indicesQueriesRegistry, parser, indexSettings.getParseFieldMatcher()); } + + /** + * Returns a new {@link QueryParseContext} like {@link #newParseContext(XContentParser)} with the only diffence, that + * the default script language will default to what has been set in the 'script.legacy.default_lang' setting. + */ + public QueryParseContext newParseContextWithLegacyScriptLanguage(XContentParser parser) { + String defaultScriptLanguage = ScriptSettings.getLegacyDefaultLang(indexSettings.getNodeSettings()); + return new QueryParseContext(defaultScriptLanguage, indicesQueriesRegistry, parser, indexSettings.getParseFieldMatcher()); + } } diff --git a/core/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java index e6e902e68f8..3ff924b28db 100644 --- a/core/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java @@ -106,7 +106,7 @@ public class ScriptQueryBuilder extends AbstractQueryBuilder // skip } else if (token == XContentParser.Token.START_OBJECT) { if (parseContext.getParseFieldMatcher().match(currentFieldName, ScriptField.SCRIPT)) { - script = Script.parse(parser, parseContext.getParseFieldMatcher()); + script = Script.parse(parser, parseContext.getParseFieldMatcher(), parseContext.getDefaultScriptLanguage()); } else { throw new ParsingException(parser.getTokenLocation(), "[script] query does not support [" + currentFieldName + "]"); } @@ -116,7 +116,7 @@ public class ScriptQueryBuilder extends AbstractQueryBuilder } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) { boost = parser.floatValue(); } else if (parseContext.getParseFieldMatcher().match(currentFieldName, ScriptField.SCRIPT)) { - script = Script.parse(parser, parseContext.getParseFieldMatcher()); + script = Script.parse(parser, parseContext.getParseFieldMatcher(), parseContext.getDefaultScriptLanguage()); } else { throw new ParsingException(parser.getTokenLocation(), "[script] query does not support [" + currentFieldName + "]"); } diff --git a/core/src/main/java/org/elasticsearch/index/query/functionscore/ScriptScoreFunctionBuilder.java b/core/src/main/java/org/elasticsearch/index/query/functionscore/ScriptScoreFunctionBuilder.java index 4cbf71f2943..e2fbc5955d7 100644 --- a/core/src/main/java/org/elasticsearch/index/query/functionscore/ScriptScoreFunctionBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/functionscore/ScriptScoreFunctionBuilder.java @@ -115,7 +115,7 @@ public class ScriptScoreFunctionBuilder extends ScoreFunctionBuilder> delayedOperations; // operations that are delayed due to relocation hand-off private volatile boolean closed; - public IndexShardOperationsLock(ShardId shardId, ESLogger logger, ThreadPool threadPool) { + public IndexShardOperationsLock(ShardId shardId, Logger logger, ThreadPool threadPool) { this.shardId = shardId; this.logger = logger; this.threadPool = threadPool; diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexingOperationListener.java b/core/src/main/java/org/elasticsearch/index/shard/IndexingOperationListener.java index 13ff87d4187..042ddec924e 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexingOperationListener.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexingOperationListener.java @@ -18,7 +18,9 @@ */ package org.elasticsearch.index.shard; -import org.elasticsearch.common.logging.ESLogger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.index.engine.Engine; import java.util.List; @@ -68,9 +70,9 @@ public interface IndexingOperationListener { */ final class CompositeListener implements IndexingOperationListener{ private final List listeners; - private final ESLogger logger; + private final Logger logger; - public CompositeListener(List listeners, ESLogger logger) { + public CompositeListener(List listeners, Logger logger) { this.listeners = listeners; this.logger = logger; } @@ -82,7 +84,7 @@ public interface IndexingOperationListener { try { listener.preIndex(operation); } catch (Exception e) { - logger.warn("preIndex listener [{}] failed", e, listener); + logger.warn((Supplier) () -> new ParameterizedMessage("preIndex listener [{}] failed", listener), e); } } return operation; @@ -95,7 +97,7 @@ public interface IndexingOperationListener { try { listener.postIndex(index, created); } catch (Exception e) { - logger.warn("postIndex listener [{}] failed", e, listener); + logger.warn((Supplier) () -> new ParameterizedMessage("postIndex listener [{}] failed", listener), e); } } } @@ -108,7 +110,7 @@ public interface IndexingOperationListener { listener.postIndex(index, ex); } catch (Exception inner) { inner.addSuppressed(ex); - logger.warn("postIndex listener [{}] failed", inner, listener); + logger.warn((Supplier) () -> new ParameterizedMessage("postIndex listener [{}] failed", listener), inner); } } } @@ -120,7 +122,7 @@ public interface IndexingOperationListener { try { listener.preDelete(delete); } catch (Exception e) { - logger.warn("preDelete listener [{}] failed", e, listener); + logger.warn((Supplier) () -> new ParameterizedMessage("preDelete listener [{}] failed", listener), e); } } return delete; @@ -133,7 +135,7 @@ public interface IndexingOperationListener { try { listener.postDelete(delete); } catch (Exception e) { - logger.warn("postDelete listener [{}] failed", e, listener); + logger.warn((Supplier) () -> new ParameterizedMessage("postDelete listener [{}] failed", listener), e); } } } @@ -146,7 +148,7 @@ public interface IndexingOperationListener { listener.postDelete(delete, ex); } catch (Exception inner) { inner.addSuppressed(ex); - logger.warn("postDelete listener [{}] failed", inner, listener); + logger.warn((Supplier) () -> new ParameterizedMessage("postDelete listener [{}] failed", listener), inner); } } } diff --git a/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java b/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java index 76352e79bb4..ca94f1ea961 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java +++ b/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java @@ -19,9 +19,9 @@ package org.elasticsearch.index.shard; +import org.apache.logging.log4j.Logger; import org.apache.lucene.search.ReferenceManager; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.index.translog.Translog; import java.io.IOException; @@ -41,7 +41,7 @@ public final class RefreshListeners implements ReferenceManager.RefreshListener private final IntSupplier getMaxRefreshListeners; private final Runnable forceRefresh; private final Executor listenerExecutor; - private final ESLogger logger; + private final Logger logger; /** * List of refresh listeners. Defaults to null and built on demand because most refresh cycles won't need it. Entries are never removed @@ -54,7 +54,7 @@ public final class RefreshListeners implements ReferenceManager.RefreshListener */ private volatile Translog.Location lastRefreshedLocation; - public RefreshListeners(IntSupplier getMaxRefreshListeners, Runnable forceRefresh, Executor listenerExecutor, ESLogger logger) { + public RefreshListeners(IntSupplier getMaxRefreshListeners, Runnable forceRefresh, Executor listenerExecutor, Logger logger) { this.getMaxRefreshListeners = getMaxRefreshListeners; this.forceRefresh = forceRefresh; this.listenerExecutor = listenerExecutor; diff --git a/core/src/main/java/org/elasticsearch/index/shard/SearchOperationListener.java b/core/src/main/java/org/elasticsearch/index/shard/SearchOperationListener.java index 5a4ac1297f7..11723c3d50a 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/SearchOperationListener.java +++ b/core/src/main/java/org/elasticsearch/index/shard/SearchOperationListener.java @@ -18,7 +18,9 @@ */ package org.elasticsearch.index.shard; -import org.elasticsearch.common.logging.ESLogger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.search.internal.SearchContext; import java.util.List; @@ -107,9 +109,9 @@ public interface SearchOperationListener { */ final class CompositeListener implements SearchOperationListener { private final List listeners; - private final ESLogger logger; + private final Logger logger; - public CompositeListener(List listeners, ESLogger logger) { + public CompositeListener(List listeners, Logger logger) { this.listeners = listeners; this.logger = logger; } @@ -120,7 +122,7 @@ public interface SearchOperationListener { try { listener.onPreQueryPhase(searchContext); } catch (Exception e) { - logger.warn("onPreQueryPhase listener [{}] failed", e, listener); + logger.warn((Supplier) () -> new ParameterizedMessage("onPreQueryPhase listener [{}] failed", listener), e); } } } @@ -131,7 +133,7 @@ public interface SearchOperationListener { try { listener.onFailedQueryPhase(searchContext); } catch (Exception e) { - logger.warn("onFailedQueryPhase listener [{}] failed", e, listener); + logger.warn((Supplier) () -> new ParameterizedMessage("onFailedQueryPhase listener [{}] failed", listener), e); } } } @@ -142,7 +144,7 @@ public interface SearchOperationListener { try { listener.onQueryPhase(searchContext, tookInNanos); } catch (Exception e) { - logger.warn("onQueryPhase listener [{}] failed", e, listener); + logger.warn((Supplier) () -> new ParameterizedMessage("onQueryPhase listener [{}] failed", listener), e); } } } @@ -153,7 +155,7 @@ public interface SearchOperationListener { try { listener.onPreFetchPhase(searchContext); } catch (Exception e) { - logger.warn("onPreFetchPhase listener [{}] failed", e, listener); + logger.warn((Supplier) () -> new ParameterizedMessage("onPreFetchPhase listener [{}] failed", listener), e); } } } @@ -164,7 +166,7 @@ public interface SearchOperationListener { try { listener.onFailedFetchPhase(searchContext); } catch (Exception e) { - logger.warn("onFailedFetchPhase listener [{}] failed", e, listener); + logger.warn((Supplier) () -> new ParameterizedMessage("onFailedFetchPhase listener [{}] failed", listener), e); } } } @@ -175,7 +177,7 @@ public interface SearchOperationListener { try { listener.onFetchPhase(searchContext, tookInNanos); } catch (Exception e) { - logger.warn("onFetchPhase listener [{}] failed", e, listener); + logger.warn((Supplier) () -> new ParameterizedMessage("onFetchPhase listener [{}] failed", listener), e); } } } @@ -186,7 +188,7 @@ public interface SearchOperationListener { try { listener.onNewContext(context); } catch (Exception e) { - logger.warn("onNewContext listener [{}] failed", e, listener); + logger.warn((Supplier) () -> new ParameterizedMessage("onNewContext listener [{}] failed", listener), e); } } } @@ -197,7 +199,7 @@ public interface SearchOperationListener { try { listener.onFreeContext(context); } catch (Exception e) { - logger.warn("onFreeContext listener [{}] failed", e, listener); + logger.warn((Supplier) () -> new ParameterizedMessage("onFreeContext listener [{}] failed", listener), e); } } } @@ -208,7 +210,7 @@ public interface SearchOperationListener { try { listener.onNewScrollContext(context); } catch (Exception e) { - logger.warn("onNewScrollContext listener [{}] failed", e, listener); + logger.warn((Supplier) () -> new ParameterizedMessage("onNewScrollContext listener [{}] failed", listener), e); } } } @@ -219,7 +221,7 @@ public interface SearchOperationListener { try { listener.onFreeScrollContext(context); } catch (Exception e) { - logger.warn("onFreeScrollContext listener [{}] failed", e, listener); + logger.warn((Supplier) () -> new ParameterizedMessage("onFreeScrollContext listener [{}] failed", listener), e); } } } diff --git a/core/src/main/java/org/elasticsearch/index/shard/ShardNotFoundException.java b/core/src/main/java/org/elasticsearch/index/shard/ShardNotFoundException.java index fa2c8ce7103..aa46240fd49 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/ShardNotFoundException.java +++ b/core/src/main/java/org/elasticsearch/index/shard/ShardNotFoundException.java @@ -33,10 +33,18 @@ public class ShardNotFoundException extends ResourceNotFoundException { } public ShardNotFoundException(ShardId shardId, Throwable ex) { - super("no such shard", ex); - setShard(shardId); - + this(shardId, "no such shard", ex); } + + public ShardNotFoundException(ShardId shardId, String msg, Object... args) { + this(shardId, msg, null, args); + } + + public ShardNotFoundException(ShardId shardId, String msg, Throwable ex, Object... args) { + super(msg, ex, args); + setShard(shardId); + } + public ShardNotFoundException(StreamInput in) throws IOException{ super(in); } diff --git a/core/src/main/java/org/elasticsearch/index/shard/ShardPath.java b/core/src/main/java/org/elasticsearch/index/shard/ShardPath.java index 154619951f6..23b17c290f1 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/ShardPath.java +++ b/core/src/main/java/org/elasticsearch/index/shard/ShardPath.java @@ -18,9 +18,9 @@ */ package org.elasticsearch.index.shard; +import org.apache.logging.log4j.Logger; import org.apache.lucene.util.IOUtils; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.ShardLock; import org.elasticsearch.index.IndexSettings; @@ -108,7 +108,7 @@ public final class ShardPath { * directories with a valid shard state exist the one with the highest version will be used. * Note: this method resolves custom data locations for the shard. */ - public static ShardPath loadShardPath(ESLogger logger, NodeEnvironment env, ShardId shardId, IndexSettings indexSettings) throws IOException { + public static ShardPath loadShardPath(Logger logger, NodeEnvironment env, ShardId shardId, IndexSettings indexSettings) throws IOException { final String indexUUID = indexSettings.getUUID(); final Path[] paths = env.availableShardPaths(shardId); Path loadedPath = null; @@ -146,7 +146,7 @@ public final class ShardPath { * This method tries to delete left-over shards where the index name has been reused but the UUID is different * to allow the new shard to be allocated. */ - public static void deleteLeftoverShardDirectory(ESLogger logger, NodeEnvironment env, ShardLock lock, IndexSettings indexSettings) throws IOException { + public static void deleteLeftoverShardDirectory(Logger logger, NodeEnvironment env, ShardLock lock, IndexSettings indexSettings) throws IOException { final String indexUUID = indexSettings.getUUID(); final Path[] paths = env.availableShardPaths(lock.getShardId()); for (Path path : paths) { diff --git a/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index a986e30f2de..44b4ed933f7 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.shard; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.apache.logging.log4j.Logger; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.NoMergePolicy; @@ -29,11 +30,11 @@ import org.apache.lucene.store.FilterDirectory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; @@ -62,10 +63,10 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; */ final class StoreRecovery { - private final ESLogger logger; + private final Logger logger; private final ShardId shardId; - StoreRecovery(ShardId shardId, ESLogger logger) { + StoreRecovery(ShardId shardId, Logger logger) { this.logger = logger; this.shardId = shardId; } @@ -346,7 +347,7 @@ final class StoreRecovery { recoveryState.getIndex().updateVersion(version); if (recoveryState.getRecoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS) { assert indexShouldExists; - indexShard.skipTranslogRecovery(); + indexShard.skipTranslogRecovery(IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP); } else { // since we recover from local, just fill the files and size try { @@ -398,7 +399,7 @@ final class StoreRecovery { } final IndexId indexId = repository.getRepositoryData().resolveIndexId(indexName); repository.restoreShard(indexShard, restoreSource.snapshot().getSnapshotId(), restoreSource.version(), indexId, snapshotShardId, indexShard.recoveryState()); - indexShard.skipTranslogRecovery(); + indexShard.skipTranslogRecovery(IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP); indexShard.finalizeRecovery(); indexShard.postRecovery("restore done"); } catch (Exception e) { diff --git a/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java b/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java index 78628a02c49..64ae0c77007 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java +++ b/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java @@ -18,10 +18,10 @@ */ package org.elasticsearch.index.shard; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.IgnoreOnRecoveryEngineException; import org.elasticsearch.index.mapper.DocumentMapperForType; @@ -44,11 +44,11 @@ import static org.elasticsearch.index.mapper.SourceToParse.source; */ public class TranslogRecoveryPerformer { private final MapperService mapperService; - private final ESLogger logger; + private final Logger logger; private final Map recoveredTypes = new HashMap<>(); private final ShardId shardId; - protected TranslogRecoveryPerformer(ShardId shardId, MapperService mapperService, ESLogger logger) { + protected TranslogRecoveryPerformer(ShardId shardId, MapperService mapperService, Logger logger) { this.shardId = shardId; this.mapperService = mapperService; this.logger = logger; @@ -147,13 +147,16 @@ public class TranslogRecoveryPerformer { * is encountered. */ private void performRecoveryOperation(Engine engine, Translog.Operation operation, boolean allowMappingUpdates, Engine.Operation.Origin origin) { + try { switch (operation.opType()) { case INDEX: Translog.Index index = (Translog.Index) operation; + // we set canHaveDuplicates to true all the time such that we de-optimze the translog case and ensure that all + // autoGeneratedID docs that are coming from the primary are updated correctly. Engine.Index engineIndex = IndexShard.prepareIndex(docMapper(index.type()), source(shardId.getIndexName(), index.type(), index.id(), index.source()) .routing(index.routing()).parent(index.parent()).timestamp(index.timestamp()).ttl(index.ttl()), - index.version(), index.versionType().versionTypeForReplicationAndRecovery(), origin); + index.version(), index.versionType().versionTypeForReplicationAndRecovery(), origin, index.getAutoGeneratedIdTimestamp(), true); maybeAddMappingUpdate(engineIndex.type(), engineIndex.parsedDoc().dynamicMappingsUpdate(), engineIndex.id(), allowMappingUpdates); if (logger.isTraceEnabled()) { logger.trace("[translog] recover [index] op of [{}][{}]", index.type(), index.id()); diff --git a/core/src/main/java/org/elasticsearch/index/store/IndexStore.java b/core/src/main/java/org/elasticsearch/index/store/IndexStore.java index 9e01d871765..783bd9af58a 100644 --- a/core/src/main/java/org/elasticsearch/index/store/IndexStore.java +++ b/core/src/main/java/org/elasticsearch/index/store/IndexStore.java @@ -26,9 +26,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.ShardPath; -/** - * - */ + public class IndexStore extends AbstractIndexComponent { public static final Setting INDEX_STORE_THROTTLE_TYPE_SETTING = new Setting<>("index.store.throttle.type", "none", IndexRateLimitingType::fromString, diff --git a/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java b/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java index 12558bb9554..ff1f6240700 100644 --- a/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java +++ b/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java @@ -18,8 +18,8 @@ */ package org.elasticsearch.index.store; +import org.apache.logging.log4j.Logger; import org.apache.lucene.store.StoreRateLimiting; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -49,7 +49,7 @@ public class IndexStoreConfig { private volatile StoreRateLimiting.Type rateLimitingType; private volatile ByteSizeValue rateLimitingThrottle; private final StoreRateLimiting rateLimiting = new StoreRateLimiting(); - private final ESLogger logger; + private final Logger logger; public IndexStoreConfig(Settings settings) { logger = Loggers.getLogger(IndexStoreConfig.class, settings); // we don't limit by default (we default to CMS's auto throttle instead): diff --git a/core/src/main/java/org/elasticsearch/index/store/Store.java b/core/src/main/java/org/elasticsearch/index/store/Store.java index ff3d89d9ff9..abcb4a44c80 100644 --- a/core/src/main/java/org/elasticsearch/index/store/Store.java +++ b/core/src/main/java/org/elasticsearch/index/store/Store.java @@ -19,6 +19,9 @@ package org.elasticsearch.index.store; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexCommit; @@ -54,7 +57,6 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.store.ByteArrayIndexInput; @@ -217,7 +219,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref * Note that this method requires the caller verify it has the right to access the store and * no concurrent file changes are happening. If in doubt, you probably want to use one of the following: * - * {@link #readMetadataSnapshot(Path, ShardId, NodeEnvironment.ShardLocker, ESLogger)} to read a meta data while locking + * {@link #readMetadataSnapshot(Path, ShardId, NodeEnvironment.ShardLocker, Logger)} to read a meta data while locking * {@link IndexShard#snapshotStoreMetadata()} to safely read from an existing shard * {@link IndexShard#acquireIndexCommit(boolean)} to get an {@link IndexCommit} which is safe to use but has to be freed * @@ -279,7 +281,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref directory.deleteFile(origFile); } catch (FileNotFoundException | NoSuchFileException e) { } catch (Exception ex) { - logger.debug("failed to delete file [{}]", ex, origFile); + logger.debug((Supplier) () -> new ParameterizedMessage("failed to delete file [{}]", origFile), ex); } // now, rename the files... and fail it it won't work directory.rename(tempFile, origFile); @@ -376,7 +378,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref * @throws IOException if the index we try to read is corrupted */ public static MetadataSnapshot readMetadataSnapshot(Path indexLocation, ShardId shardId, NodeEnvironment.ShardLocker shardLocker, - ESLogger logger) throws IOException { + Logger logger) throws IOException { try (ShardLock lock = shardLocker.lock(shardId, TimeUnit.SECONDS.toMillis(5)); Directory dir = new SimpleFSDirectory(indexLocation)) { failIfCorrupted(dir, shardId); @@ -396,11 +398,11 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref * can be successfully opened. This includes reading the segment infos and possible * corruption markers. */ - public static boolean canOpenIndex(ESLogger logger, Path indexLocation, ShardId shardId, NodeEnvironment.ShardLocker shardLocker) throws IOException { + public static boolean canOpenIndex(Logger logger, Path indexLocation, ShardId shardId, NodeEnvironment.ShardLocker shardLocker) throws IOException { try { tryOpenIndex(indexLocation, shardId, shardLocker, logger); } catch (Exception ex) { - logger.trace("Can't open index for path [{}]", ex, indexLocation); + logger.trace((Supplier) () -> new ParameterizedMessage("Can't open index for path [{}]", indexLocation), ex); return false; } return true; @@ -411,7 +413,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref * segment infos and possible corruption markers. If the index can not * be opened, an exception is thrown */ - public static void tryOpenIndex(Path indexLocation, ShardId shardId, NodeEnvironment.ShardLocker shardLocker, ESLogger logger) throws IOException { + public static void tryOpenIndex(Path indexLocation, ShardId shardId, NodeEnvironment.ShardLocker shardLocker, Logger logger) throws IOException { try (ShardLock lock = shardLocker.lock(shardId, TimeUnit.SECONDS.toMillis(5)); Directory dir = new SimpleFSDirectory(indexLocation)) { failIfCorrupted(dir, shardId); @@ -605,7 +607,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref // if one of those files can't be deleted we better fail the cleanup otherwise we might leave an old commit point around? throw new IllegalStateException("Can't delete " + existingFile + " - cleanup failed", ex); } - logger.debug("failed to delete file [{}]", ex, existingFile); + logger.debug((Supplier) () -> new ParameterizedMessage("failed to delete file [{}]", existingFile), ex); // ignore, we don't really care, will get deleted later on } } @@ -652,9 +654,9 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref static final class StoreDirectory extends FilterDirectory { - private final ESLogger deletesLogger; + private final Logger deletesLogger; - StoreDirectory(Directory delegateDirectory, ESLogger deletesLogger) throws IOException { + StoreDirectory(Directory delegateDirectory, Logger deletesLogger) throws IOException { super(delegateDirectory); this.deletesLogger = deletesLogger; } @@ -717,7 +719,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref numDocs = 0; } - MetadataSnapshot(IndexCommit commit, Directory directory, ESLogger logger) throws IOException { + MetadataSnapshot(IndexCommit commit, Directory directory, Logger logger) throws IOException { LoadedMetadata loadedMetadata = loadMetadata(commit, directory, logger); metadata = loadedMetadata.fileMetadata; commitUserData = loadedMetadata.userData; @@ -780,7 +782,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref } } - static LoadedMetadata loadMetadata(IndexCommit commit, Directory directory, ESLogger logger) throws IOException { + static LoadedMetadata loadMetadata(IndexCommit commit, Directory directory, Logger logger) throws IOException { long numDocs; Map builder = new HashMap<>(); Map commitUserDataBuilder = new HashMap<>(); @@ -823,8 +825,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref // Lucene checks the checksum after it tries to lookup the codec etc. // in that case we might get only IAE or similar exceptions while we are really corrupt... // TODO we should check the checksum in lucene if we hit an exception - logger.warn("failed to build store metadata. checking segment info integrity (with commit [{}])", - ex, commit == null ? "no" : "yes"); + logger.warn((Supplier) () -> new ParameterizedMessage("failed to build store metadata. checking segment info integrity (with commit [{}])", commit == null ? "no" : "yes"), ex); Lucene.checkSegmentInfoIntegrity(directory); } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException cex) { cex.addSuppressed(ex); @@ -839,7 +840,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref } private static void checksumFromLuceneFile(Directory directory, String file, Map builder, - ESLogger logger, Version version, boolean readFileAsHash) throws IOException { + Logger logger, Version version, boolean readFileAsHash) throws IOException { final String checksum; final BytesRefBuilder fileHash = new BytesRefBuilder(); try (final IndexInput in = directory.openInput(file, IOContext.READONCE)) { @@ -859,7 +860,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref } } catch (Exception ex) { - logger.debug("Can retrieve checksum from file [{}]", ex, file); + logger.debug((Supplier) () -> new ParameterizedMessage("Can retrieve checksum from file [{}]", file), ex); throw ex; } builder.put(file, new StoreFileMetaData(file, length, checksum, version, fileHash.get())); diff --git a/core/src/main/java/org/elasticsearch/index/translog/Translog.java b/core/src/main/java/org/elasticsearch/index/translog/Translog.java index 68a1dd1aa36..0082b7a0336 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/core/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -19,11 +19,14 @@ package org.elasticsearch.index.translog; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.Term; import org.apache.lucene.index.TwoPhaseCommit; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -257,7 +260,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC try { Files.delete(tempFile); } catch (IOException ex) { - logger.warn("failed to delete temp file {}", ex, tempFile); + logger.warn((Supplier) () -> new ParameterizedMessage("failed to delete temp file {}", tempFile), ex); } } } @@ -828,8 +831,11 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC } public static class Index implements Operation { - public static final int SERIALIZATION_FORMAT = 6; // since 2.0-beta1 and 1.1 + public static final int FORMAT_2x = 6; // since 2.0-beta1 and 1.1 + public static final int FORMAT_AUTO_GENERATED_IDS = 7; // since 5.0.0-beta1 + public static final int SERIALIZATION_FORMAT = FORMAT_AUTO_GENERATED_IDS; private final String id; + private final long autoGeneratedIdTimestamp; private final String type; private final long version; private final VersionType versionType; @@ -841,7 +847,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC public Index(StreamInput in) throws IOException { final int format = in.readVInt(); // SERIALIZATION_FORMAT - assert format == SERIALIZATION_FORMAT : "format was: " + format; + assert format >= FORMAT_2x : "format was: " + format; id = in.readString(); type = in.readString(); source = in.readBytesReference(); @@ -852,6 +858,11 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC this.ttl = in.readLong(); this.versionType = VersionType.fromValue(in.readByte()); assert versionType.validateVersionForWrites(this.version); + if (format >= FORMAT_AUTO_GENERATED_IDS) { + this.autoGeneratedIdTimestamp = in.readLong(); + } else { + this.autoGeneratedIdTimestamp = IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP; + } } public Index(Engine.Index index) { @@ -864,6 +875,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC this.timestamp = index.timestamp(); this.ttl = index.ttl(); this.versionType = index.versionType(); + this.autoGeneratedIdTimestamp = index.getAutoGeneratedIdTimestamp(); } public Index(String type, String id, byte[] source) { @@ -876,6 +888,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC parent = null; timestamp = 0; ttl = 0; + autoGeneratedIdTimestamp = -1; } @Override @@ -941,6 +954,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC out.writeLong(timestamp); out.writeLong(ttl); out.writeByte(versionType.getValue()); + out.writeLong(autoGeneratedIdTimestamp); } @Override @@ -960,6 +974,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC id.equals(index.id) == false || type.equals(index.type) == false || versionType != index.versionType || + autoGeneratedIdTimestamp != index.autoGeneratedIdTimestamp || source.equals(index.source) == false) { return false; } @@ -980,6 +995,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC result = 31 * result + (routing != null ? routing.hashCode() : 0); result = 31 * result + (parent != null ? parent.hashCode() : 0); result = 31 * result + Long.hashCode(timestamp); + result = 31 * result + Long.hashCode(autoGeneratedIdTimestamp); result = 31 * result + Long.hashCode(ttl); return result; } @@ -991,6 +1007,10 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC ", type='" + type + '\'' + '}'; } + + public long getAutoGeneratedIdTimestamp() { + return autoGeneratedIdTimestamp; + } } public static class Delete implements Operation { diff --git a/core/src/main/java/org/elasticsearch/index/translog/TranslogToolCli.java b/core/src/main/java/org/elasticsearch/index/translog/TranslogToolCli.java index eaf50f25a01..d565074a50c 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/TranslogToolCli.java +++ b/core/src/main/java/org/elasticsearch/index/translog/TranslogToolCli.java @@ -24,7 +24,6 @@ import org.elasticsearch.cli.Terminal; import org.elasticsearch.common.logging.LogConfigurator; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; -import org.elasticsearch.index.translog.TruncateTranslogCommand; import org.elasticsearch.node.internal.InternalSettingsPreparer; /** @@ -32,24 +31,22 @@ import org.elasticsearch.node.internal.InternalSettingsPreparer; */ public class TranslogToolCli extends MultiCommand { - public TranslogToolCli() { + private TranslogToolCli() { super("A CLI tool for various Elasticsearch translog actions"); subcommands.put("truncate", new TruncateTranslogCommand()); } public static void main(String[] args) throws Exception { - // initialize default for es.logger.level because we will not read the logging.yml + // initialize default for es.logger.level because we will not read the log4j2.properties String loggerLevel = System.getProperty("es.logger.level", "INFO"); String pathHome = System.getProperty("es.path.home"); // Set the appender for all potential log files to terminal so that other components that use the logger print out the // same terminal. Environment loggingEnvironment = InternalSettingsPreparer.prepareEnvironment(Settings.builder() .put("path.home", pathHome) - .put("appender.terminal.type", "terminal") - .put("rootLogger", "${logger.level}, terminal") .put("logger.level", loggerLevel) .build(), Terminal.DEFAULT); - LogConfigurator.configure(loggingEnvironment.settings(), false); + LogConfigurator.configure(loggingEnvironment, false); exit(new TranslogToolCli().main(args, Terminal.DEFAULT)); } diff --git a/core/src/main/java/org/elasticsearch/index/warmer/ShardIndexWarmerService.java b/core/src/main/java/org/elasticsearch/index/warmer/ShardIndexWarmerService.java index 57decb25f56..d10a951937e 100644 --- a/core/src/main/java/org/elasticsearch/index/warmer/ShardIndexWarmerService.java +++ b/core/src/main/java/org/elasticsearch/index/warmer/ShardIndexWarmerService.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.warmer; -import org.elasticsearch.common.logging.ESLogger; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.index.IndexSettings; @@ -28,8 +28,6 @@ import org.elasticsearch.index.shard.ShardId; import java.util.concurrent.TimeUnit; -/** - */ public class ShardIndexWarmerService extends AbstractIndexShardComponent { private final CounterMetric current = new CounterMetric(); @@ -39,7 +37,7 @@ public class ShardIndexWarmerService extends AbstractIndexShardComponent { super(shardId, indexSettings); } - public ESLogger logger() { + public Logger logger() { return this.logger; } diff --git a/core/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java b/core/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java index 2e82e819591..d28a8da73dc 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java +++ b/core/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java @@ -19,6 +19,8 @@ package org.elasticsearch.indices; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -177,7 +179,7 @@ public class IndexingMemoryController extends AbstractComponent implements Index @Override public void onFailure(Exception e) { - logger.warn("failed to write indexing buffer for shard [{}]; ignoring", e, shard.shardId()); + logger.warn((Supplier) () -> new ParameterizedMessage("failed to write indexing buffer for shard [{}]; ignoring", shard.shardId()), e); } }); } diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesService.java b/core/src/main/java/org/elasticsearch/indices/IndicesService.java index b951eaefaa1..abc9873efaf 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -20,6 +20,9 @@ package org.elasticsearch.indices; import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.util.CollectionUtil; @@ -49,7 +52,6 @@ import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; @@ -97,8 +99,8 @@ import org.elasticsearch.indices.cluster.IndicesClusterStateService; import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.indices.query.IndicesQueriesRegistry; -import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; +import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.search.internal.SearchContext; @@ -218,7 +220,7 @@ public class IndicesService extends AbstractLifecycleComponent try { removeIndex(index, "shutdown", false); } catch (Exception e) { - logger.warn("failed to remove index on stop [{}]", e, index); + logger.warn((Supplier) () -> new ParameterizedMessage("failed to remove index on stop [{}]", index), e); } finally { latch.countDown(); } @@ -296,7 +298,7 @@ public class IndicesService extends AbstractLifecycleComponent } } catch (IllegalIndexShardStateException e) { // we can safely ignore illegal state on ones that are closing for example - logger.trace("{} ignoring shard stats", e, indexShard.shardId()); + logger.trace((Supplier) () -> new ParameterizedMessage("{} ignoring shard stats", indexShard.shardId()), e); } } } @@ -474,7 +476,7 @@ public class IndicesService extends AbstractLifecycleComponent try { removeIndex(index, reason, false); } catch (Exception e) { - logger.warn("failed to remove index ({})", e, reason); + logger.warn((Supplier) () -> new ParameterizedMessage("failed to remove index ({})", reason), e); } } @@ -565,7 +567,7 @@ public class IndicesService extends AbstractLifecycleComponent try { removeIndex(index, reason, true); } catch (Exception e) { - logger.warn("failed to delete index ({})", e, reason); + logger.warn((Supplier) () -> new ParameterizedMessage("failed to delete index ({})", reason), e); } } @@ -585,7 +587,7 @@ public class IndicesService extends AbstractLifecycleComponent } deleteIndexStore(reason, metaData, clusterState); } catch (IOException e) { - logger.warn("[{}] failed to delete unassigned index (reason [{}])", e, metaData.getIndex(), reason); + logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to delete unassigned index (reason [{}])", metaData.getIndex(), reason), e); } } } @@ -637,9 +639,9 @@ public class IndicesService extends AbstractLifecycleComponent } success = true; } catch (LockObtainFailedException ex) { - logger.debug("{} failed to delete index store - at least one shards is still locked", ex, index); + logger.debug((Supplier) () -> new ParameterizedMessage("{} failed to delete index store - at least one shards is still locked", index), ex); } catch (Exception ex) { - logger.warn("{} failed to delete index", ex, index); + logger.warn((Supplier) () -> new ParameterizedMessage("{} failed to delete index", index), ex); } finally { if (success == false) { addPendingDelete(index, indexSettings); @@ -746,7 +748,7 @@ public class IndicesService extends AbstractLifecycleComponent try { metaData = metaStateService.loadIndexState(index); } catch (IOException e) { - logger.warn("[{}] failed to load state file from a stale deleted index, folders will be left on disk", e, index); + logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to load state file from a stale deleted index, folders will be left on disk", index), e); return null; } final IndexSettings indexSettings = buildIndexSettings(metaData); @@ -755,7 +757,7 @@ public class IndicesService extends AbstractLifecycleComponent } catch (IOException e) { // we just warn about the exception here because if deleteIndexStoreIfDeletionAllowed // throws an exception, it gets added to the list of pending deletes to be tried again - logger.warn("[{}] failed to delete index on disk", e, metaData.getIndex()); + logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to delete index on disk", metaData.getIndex()), e); } return metaData; } @@ -927,7 +929,7 @@ public class IndicesService extends AbstractLifecycleComponent nodeEnv.deleteIndexDirectoryUnderLock(index, indexSettings); iterator.remove(); } catch (IOException ex) { - logger.debug("{} retry pending delete", ex, index); + logger.debug((Supplier) () -> new ParameterizedMessage("{} retry pending delete", index), ex); } } else { assert delete.shardId != -1; @@ -937,7 +939,7 @@ public class IndicesService extends AbstractLifecycleComponent deleteShardStore("pending delete", shardLock, delete.settings); iterator.remove(); } catch (IOException ex) { - logger.debug("{} retry pending delete", ex, shardLock.getShardId()); + logger.debug((Supplier) () -> new ParameterizedMessage("{} retry pending delete", shardLock.getShardId()), ex); } } else { logger.warn("{} no shard lock for pending delete", delete.shardId); @@ -1000,13 +1002,13 @@ public class IndicesService extends AbstractLifecycleComponent private static final class CacheCleaner implements Runnable, Releasable { private final IndicesFieldDataCache cache; - private final ESLogger logger; + private final Logger logger; private final ThreadPool threadPool; private final TimeValue interval; private final AtomicBoolean closed = new AtomicBoolean(false); private final IndicesRequestCache requestCache; - public CacheCleaner(IndicesFieldDataCache cache, IndicesRequestCache requestCache, ESLogger logger, ThreadPool threadPool, TimeValue interval) { + public CacheCleaner(IndicesFieldDataCache cache, IndicesRequestCache requestCache, Logger logger, ThreadPool threadPool, TimeValue interval) { this.cache = cache; this.requestCache = requestCache; this.logger = logger; diff --git a/core/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java b/core/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java index f3812f6900e..a324e8282a1 100644 --- a/core/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java +++ b/core/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java @@ -18,6 +18,8 @@ */ package org.elasticsearch.indices.analysis; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.analysis.hunspell.Dictionary; import org.apache.lucene.store.Directory; import org.apache.lucene.store.SimpleFSDirectory; @@ -138,7 +140,9 @@ public class HunspellService extends AbstractComponent { } catch (Exception e) { // The cache loader throws unchecked exception (see #loadDictionary()), // here we simply report the exception and continue loading the dictionaries - logger.error("exception while loading dictionary {}", e, file.getFileName()); + logger.error( + (Supplier) () -> new ParameterizedMessage( + "exception while loading dictionary {}", file.getFileName()), e); } } } @@ -196,7 +200,7 @@ public class HunspellService extends AbstractComponent { } } catch (Exception e) { - logger.error("Could not load hunspell dictionary [{}]", e, locale); + logger.error((Supplier) () -> new ParameterizedMessage("Could not load hunspell dictionary [{}]", locale), e); throw e; } finally { IOUtils.close(affixStream); diff --git a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index eb1d9f07dc2..e766f6ecefb 100644 --- a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -19,6 +19,9 @@ package org.elasticsearch.indices.cluster; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.store.LockObtainFailedException; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; @@ -37,7 +40,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -61,10 +63,10 @@ import org.elasticsearch.index.shard.ShardNotFoundException; import org.elasticsearch.indices.IndexAlreadyExistsException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.flush.SyncedFlushService; -import org.elasticsearch.indices.recovery.RecoveryFailedException; import org.elasticsearch.indices.recovery.PeerRecoverySourceService; -import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; +import org.elasticsearch.indices.recovery.RecoveryFailedException; +import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.search.SearchService; import org.elasticsearch.snapshots.RestoreService; @@ -269,7 +271,8 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple threadPool.generic().execute(new AbstractRunnable() { @Override public void onFailure(Exception e) { - logger.warn("[{}] failed to complete pending deletion for index", e, index); + logger.warn( + (Supplier) () -> new ParameterizedMessage("[{}] failed to complete pending deletion for index", index), e); } @Override @@ -559,7 +562,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple * routing to *require* peer recovery, use {@link ShardRouting#recoverySource()} to * check if its needed or not. */ - private static DiscoveryNode findSourceNodeForPeerRecovery(ESLogger logger, RoutingTable routingTable, DiscoveryNodes nodes, + private static DiscoveryNode findSourceNodeForPeerRecovery(Logger logger, RoutingTable routingTable, DiscoveryNodes nodes, ShardRouting shardRouting) { DiscoveryNode sourceNode = null; if (!shardRouting.primary()) { @@ -637,11 +640,12 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple } catch (Exception inner) { inner.addSuppressed(failure); logger.warn( - "[{}][{}] failed to remove shard after failure ([{}])", - inner, - shardRouting.getIndexName(), - shardRouting.getId(), - message); + (Supplier) () -> new ParameterizedMessage( + "[{}][{}] failed to remove shard after failure ([{}])", + shardRouting.getIndexName(), + shardRouting.getId(), + message), + inner); } if (sendShardFailure) { sendFailShard(shardRouting, message, failure); @@ -650,17 +654,20 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple private void sendFailShard(ShardRouting shardRouting, String message, @Nullable Exception failure) { try { - logger.warn("[{}] marking and sending shard failed due to [{}]", failure, shardRouting.shardId(), message); + logger.warn( + (Supplier) () -> new ParameterizedMessage( + "[{}] marking and sending shard failed due to [{}]", shardRouting.shardId(), message), failure); failedShardsCache.put(shardRouting.shardId(), shardRouting); shardStateAction.localShardFailed(shardRouting, message, failure, SHARD_STATE_ACTION_LISTENER); } catch (Exception inner) { if (failure != null) inner.addSuppressed(failure); logger.warn( + (Supplier) () -> new ParameterizedMessage( "[{}][{}] failed to mark shard as failed (because of [{}])", - inner, shardRouting.getIndexName(), shardRouting.getId(), - message); + message), + inner); } } diff --git a/core/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java b/core/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java index 3ab18dd1bd0..81e9f3fac5f 100644 --- a/core/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java +++ b/core/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java @@ -19,6 +19,7 @@ package org.elasticsearch.indices.fielddata.cache; +import org.apache.logging.log4j.Logger; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; @@ -31,7 +32,6 @@ import org.elasticsearch.common.cache.RemovalListener; import org.elasticsearch.common.cache.RemovalNotification; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -108,13 +108,13 @@ public class IndicesFieldDataCache extends AbstractComponent implements RemovalL * A specific cache instance for the relevant parameters of it (index, fieldNames, fieldType). */ static class IndexFieldCache implements IndexFieldDataCache, SegmentReader.CoreClosedListener, IndexReader.ReaderClosedListener { - private final ESLogger logger; + private final Logger logger; final Index index; final String fieldName; private final Cache cache; private final Listener[] listeners; - IndexFieldCache(ESLogger logger,final Cache cache, Index index, String fieldName, Listener... listeners) { + IndexFieldCache(Logger logger,final Cache cache, Index index, String fieldName, Listener... listeners) { this.logger = logger; this.listeners = listeners; this.index = index; diff --git a/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java b/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java index 236b3768714..273682db312 100644 --- a/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java +++ b/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java @@ -18,6 +18,8 @@ */ package org.elasticsearch.indices.flush; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.flush.FlushRequest; @@ -50,12 +52,12 @@ import org.elasticsearch.index.shard.ShardNotFoundException; import org.elasticsearch.indices.IndexClosedException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportService; import java.io.IOException; @@ -100,7 +102,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL @Override public void onFailure(Exception e) { - logger.debug("{} sync flush on inactive shard failed", e, indexShard.shardId()); + logger.debug((Supplier) () -> new ParameterizedMessage("{} sync flush on inactive shard failed", indexShard.shardId()), e); } }); } @@ -335,7 +337,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL @Override public void handleException(TransportException exp) { - logger.trace("{} error while performing synced flush on [{}], skipping", exp, shardId, shard); + logger.trace((Supplier) () -> new ParameterizedMessage("{} error while performing synced flush on [{}], skipping", shardId, shard), exp); results.put(shard, new ShardSyncedFlushResponse(exp.getMessage())); contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results); } @@ -391,7 +393,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL @Override public void handleException(TransportException exp) { - logger.trace("{} error while performing pre synced flush on [{}], skipping", exp, shardId, shard); + logger.trace((Supplier) () -> new ParameterizedMessage("{} error while performing pre synced flush on [{}], skipping", shardId, shard), exp); if (countDown.countDown()) { listener.onResponse(commitIds); } diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index 37fe07e16a8..f26d0787f41 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -19,6 +19,8 @@ package org.elasticsearch.indices.recovery; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.RateLimiter; import org.elasticsearch.ElasticsearchException; @@ -141,7 +143,9 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde protected void retryRecovery(final RecoveryTarget recoveryTarget, final Throwable reason, TimeValue retryAfter, final StartRecoveryRequest currentRequest) { - logger.trace("will retry recovery with id [{}] in [{}]", reason, recoveryTarget.recoveryId(), retryAfter); + logger.trace( + (Supplier) () -> new ParameterizedMessage( + "will retry recovery with id [{}] in [{}]", recoveryTarget.recoveryId(), retryAfter), reason); retryRecovery(recoveryTarget, retryAfter, currentRequest); } @@ -233,7 +237,12 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde logger.trace("recovery cancelled", e); } catch (Exception e) { if (logger.isTraceEnabled()) { - logger.trace("[{}][{}] Got exception on recovery", e, request.shardId().getIndex().getName(), request.shardId().id()); + logger.trace( + (Supplier) () -> new ParameterizedMessage( + "[{}][{}] Got exception on recovery", + request.shardId().getIndex().getName(), + request.shardId().id()), + e); } Throwable cause = ExceptionsHelper.unwrapCause(e); if (cause instanceof CancellableThreads.ExecutionCancelledException) { @@ -295,7 +304,7 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde public void messageReceived(RecoveryPrepareForTranslogOperationsRequest request, TransportChannel channel) throws Exception { try (RecoveriesCollection.RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId() )) { - recoveryRef.status().prepareForTranslogOperations(request.totalTranslogOps()); + recoveryRef.status().prepareForTranslogOperations(request.totalTranslogOps(), request.getMaxUnsafeAutoIdTimestamp()); } channel.sendResponse(TransportResponse.Empty.INSTANCE); } @@ -345,8 +354,11 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde // which causes local mapping changes since the mapping (clusterstate) might not have arrived on this node. // we want to wait until these mappings are processed but also need to do some maintenance and roll back the // number of processed (completed) operations in this batch to ensure accounting is correct. - logger.trace("delaying recovery due to missing mapping changes (rolling back stats for [{}] ops)", exception, exception - .completedOperations()); + logger.trace( + (Supplier) () -> new ParameterizedMessage( + "delaying recovery due to missing mapping changes (rolling back stats for [{}] ops)", + exception.completedOperations()), + exception); final RecoveryState.Translog translog = recoveryTarget.state().getTranslog(); translog.decrementRecoveredOperations(exception.completedOperations()); // do the maintainance and rollback competed ops // we do not need to use a timeout here since the entire recovery mechanism has an inactivity protection (it will be @@ -425,8 +437,12 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde logger.trace("successfully waited for cluster state with version {} (current: {})", clusterStateVersion, observer.observedState().getVersion()); } catch (Exception e) { - logger.debug("failed waiting for cluster state with version {} (current: {})", e, clusterStateVersion, - observer.observedState()); + logger.debug( + (Supplier) () -> new ParameterizedMessage( + "failed waiting for cluster state with version {} (current: {})", + clusterStateVersion, + observer.observedState()), + e); throw ExceptionsHelper.convertToRuntime(e); } } @@ -504,13 +520,17 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde public void onFailure(Exception e) { try (RecoveriesCollection.RecoveryRef recoveryRef = onGoingRecoveries.getRecovery(recoveryId)) { if (recoveryRef != null) { - logger.error("unexpected error during recovery [{}], failing shard", e, recoveryId); + logger.error( + (Supplier) () -> new ParameterizedMessage( + "unexpected error during recovery [{}], failing shard", recoveryId), e); onGoingRecoveries.failRecovery(recoveryId, new RecoveryFailedException(recoveryRef.status().state(), "unexpected error", e), true // be safe ); } else { - logger.debug("unexpected error during recovery, but recovery id [{}] is finished", e, recoveryId); + logger.debug( + (Supplier) () -> new ParameterizedMessage( + "unexpected error during recovery, but recovery id [{}] is finished", recoveryId), e); } } } diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverFilesRecoveryException.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverFilesRecoveryException.java index 69a55e03c9a..d2e07bd9e4c 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverFilesRecoveryException.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverFilesRecoveryException.java @@ -57,7 +57,7 @@ public class RecoverFilesRecoveryException extends ElasticsearchException implem public RecoverFilesRecoveryException(StreamInput in) throws IOException{ super(in); numberOfFiles = in.readInt(); - totalFilesSize = ByteSizeValue.readBytesSizeValue(in); + totalFilesSize = new ByteSizeValue(in); } @Override diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java index 65da26eb676..65a48b18e22 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java @@ -19,9 +19,11 @@ package org.elasticsearch.indices.recovery; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.Callback; import org.elasticsearch.common.util.concurrent.AbstractRunnable; @@ -46,11 +48,11 @@ public class RecoveriesCollection { /** This is the single source of truth for ongoing recoveries. If it's not here, it was canceled or done */ private final ConcurrentMap onGoingRecoveries = ConcurrentCollections.newConcurrentMap(); - private final ESLogger logger; + private final Logger logger; private final ThreadPool threadPool; private final Callback ensureClusterStateVersionCallback; - public RecoveriesCollection(ESLogger logger, ThreadPool threadPool, Callback ensureClusterStateVersionCallback) { + public RecoveriesCollection(Logger logger, ThreadPool threadPool, Callback ensureClusterStateVersionCallback) { this.logger = logger; this.threadPool = threadPool; this.ensureClusterStateVersionCallback = ensureClusterStateVersionCallback; @@ -222,7 +224,7 @@ public class RecoveriesCollection { @Override public void onFailure(Exception e) { - logger.error("unexpected error while monitoring recovery [{}]", e, recoveryId); + logger.error((Supplier) () -> new ParameterizedMessage("unexpected error while monitoring recovery [{}]", recoveryId), e); } @Override diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java index 3d5d7052c9d..171102d07ea 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java @@ -19,6 +19,7 @@ package org.elasticsearch.indices.recovery; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.shard.ShardId; @@ -31,6 +32,7 @@ import java.io.IOException; */ public class RecoveryPrepareForTranslogOperationsRequest extends TransportRequest { + private long maxUnsafeAutoIdTimestamp = IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP; private long recoveryId; private ShardId shardId; private int totalTranslogOps = RecoveryState.Translog.UNKNOWN; @@ -38,10 +40,11 @@ public class RecoveryPrepareForTranslogOperationsRequest extends TransportReques public RecoveryPrepareForTranslogOperationsRequest() { } - RecoveryPrepareForTranslogOperationsRequest(long recoveryId, ShardId shardId, int totalTranslogOps) { + RecoveryPrepareForTranslogOperationsRequest(long recoveryId, ShardId shardId, int totalTranslogOps, long maxUnsafeAutoIdTimestamp) { this.recoveryId = recoveryId; this.shardId = shardId; this.totalTranslogOps = totalTranslogOps; + this.maxUnsafeAutoIdTimestamp = maxUnsafeAutoIdTimestamp; } public long recoveryId() { @@ -56,12 +59,17 @@ public class RecoveryPrepareForTranslogOperationsRequest extends TransportReques return totalTranslogOps; } + public long getMaxUnsafeAutoIdTimestamp() { + return maxUnsafeAutoIdTimestamp; + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); recoveryId = in.readLong(); shardId = ShardId.readShardId(in); totalTranslogOps = in.readVInt(); + maxUnsafeAutoIdTimestamp = in.readLong(); } @Override @@ -70,5 +78,6 @@ public class RecoveryPrepareForTranslogOperationsRequest extends TransportReques out.writeLong(recoveryId); shardId.writeTo(out); out.writeVInt(totalTranslogOps); + out.writeLong(maxUnsafeAutoIdTimestamp); } } diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index f78929e516f..790376ba78e 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -19,6 +19,8 @@ package org.elasticsearch.indices.recovery; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexFormatTooNewException; @@ -30,13 +32,11 @@ import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.StopWatch; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.lucene.store.InputStreamIndexInput; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.CancellableThreads; @@ -72,7 +72,7 @@ import java.util.stream.StreamSupport; */ public class RecoverySourceHandler { - protected final ESLogger logger; + protected final Logger logger; // Shard that is going to be recovered (the "source") private final IndexShard shard; private final String indexName; @@ -107,7 +107,7 @@ public class RecoverySourceHandler { final Supplier currentClusterStateVersionSupplier, Function delayNewRecoveries, final int fileChunkSizeInBytes, - final ESLogger logger) { + final Logger logger) { this.shard = shard; this.recoveryTarget = recoveryTarget; this.request = request; @@ -314,8 +314,12 @@ public class RecoverySourceHandler { RemoteTransportException exception = new RemoteTransportException("File corruption occurred on recovery but " + "checksums are ok", null); exception.addSuppressed(targetException); - logger.warn("{} Remote file corruption during finalization of recovery on node {}. local checksum OK", - corruptIndexException, shard.shardId(), request.targetNode()); + logger.warn( + (org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( + "{} Remote file corruption during finalization of recovery on node {}. local checksum OK", + shard.shardId(), + request.targetNode()), + corruptIndexException); throw exception; } else { throw targetException; @@ -342,7 +346,8 @@ public class RecoverySourceHandler { // Send a request preparing the new shard's translog to receive // operations. This ensures the shard engine is started and disables // garbage collection (not the JVM's GC!) of tombstone deletes - cancellableThreads.executeIO(() -> recoveryTarget.prepareForTranslogOperations(totalTranslogOps)); + cancellableThreads.executeIO(() -> recoveryTarget.prepareForTranslogOperations(totalTranslogOps, + shard.segmentStats(false).getMaxUnsafeAutoIdTimestamp())); stopWatch.stop(); response.startTime = stopWatch.totalTime().millis() - startEngineStart; @@ -557,8 +562,13 @@ public class RecoverySourceHandler { RemoteTransportException exception = new RemoteTransportException("File corruption occurred on recovery but " + "checksums are ok", null); exception.addSuppressed(e); - logger.warn("{} Remote file corruption on node {}, recovering {}. local checksum OK", - corruptIndexException, shardId, request.targetNode(), md); + logger.warn( + (org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( + "{} Remote file corruption on node {}, recovering {}. local checksum OK", + shardId, + request.targetNode(), + md), + corruptIndexException); throw exception; } } else { diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index 2cb1d89c150..d608dc50e23 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -19,6 +19,9 @@ package org.elasticsearch.indices.recovery; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexFormatTooNewException; import org.apache.lucene.index.IndexFormatTooOldException; @@ -31,7 +34,6 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.util.Callback; @@ -62,7 +64,7 @@ import java.util.concurrent.atomic.AtomicLong; */ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTargetHandler { - private final ESLogger logger; + private final Logger logger; private static final AtomicLong idGenerator = new AtomicLong(); @@ -293,7 +295,8 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget try { entry.getValue().close(); } catch (Exception e) { - logger.debug("error while closing recovery output [{}]", e, entry.getValue()); + logger.debug( + (Supplier) () -> new ParameterizedMessage("error while closing recovery output [{}]", entry.getValue()), e); } iterator.remove(); } @@ -324,9 +327,9 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget /*** Implementation of {@link RecoveryTargetHandler } */ @Override - public void prepareForTranslogOperations(int totalTranslogOps) throws IOException { + public void prepareForTranslogOperations(int totalTranslogOps, long maxUnsafeAutoIdTimestamp) throws IOException { state().getTranslog().totalOperations(totalTranslogOps); - indexShard().skipTranslogRecovery(); + indexShard().skipTranslogRecovery(maxUnsafeAutoIdTimestamp); } @Override diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java index 3d7e4f29c35..18966028792 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java @@ -33,8 +33,10 @@ public interface RecoveryTargetHandler { * Prepares the tranget to receive translog operations, after all file have been copied * * @param totalTranslogOps total translog operations expected to be sent + * @param maxUnsafeAutoIdTimestamp the max timestamp that is used to de-optimize documents with auto-generated IDs in the engine. + * This is used to ensure we don't add duplicate documents when we assume an append only case based on auto-generated IDs */ - void prepareForTranslogOperations(int totalTranslogOps) throws IOException; + void prepareForTranslogOperations(int totalTranslogOps, long maxUnsafeAutoIdTimestamp) throws IOException; /** * The finalize request clears unreferenced translog files, refreshes the engine now that diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java b/core/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java index 428c1615880..327eb3b8eca 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java @@ -74,9 +74,9 @@ public class RemoteRecoveryTargetHandler implements RecoveryTargetHandler { } @Override - public void prepareForTranslogOperations(int totalTranslogOps) throws IOException { + public void prepareForTranslogOperations(int totalTranslogOps, long maxUnsafeAutoIdTimestamp) throws IOException { transportService.submitRequest(targetNode, PeerRecoveryTargetService.Actions.PREPARE_TRANSLOG, - new RecoveryPrepareForTranslogOperationsRequest(recoveryId, shardId, totalTranslogOps), + new RecoveryPrepareForTranslogOperationsRequest(recoveryId, shardId, totalTranslogOps, maxUnsafeAutoIdTimestamp), TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionTimeout()).build(), EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); } diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/SharedFSRecoverySourceHandler.java b/core/src/main/java/org/elasticsearch/indices/recovery/SharedFSRecoverySourceHandler.java index 38656963266..591176f047a 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/SharedFSRecoverySourceHandler.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/SharedFSRecoverySourceHandler.java @@ -19,8 +19,8 @@ package org.elasticsearch.indices.recovery; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.translog.Translog; @@ -39,7 +39,7 @@ public class SharedFSRecoverySourceHandler extends RecoverySourceHandler { public SharedFSRecoverySourceHandler(IndexShard shard, RecoveryTargetHandler recoveryTarget, StartRecoveryRequest request, Supplier currentClusterStateVersionSupplier, - Function delayNewRecoveries, ESLogger logger) { + Function delayNewRecoveries, Logger logger) { super(shard, recoveryTarget, request, currentClusterStateVersionSupplier, delayNewRecoveries, -1, logger); this.shard = shard; this.request = request; diff --git a/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java b/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java index bc7e7f59fc0..439806b454b 100644 --- a/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java +++ b/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java @@ -19,6 +19,8 @@ package org.elasticsearch.indices.store; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -228,7 +230,7 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe @Override public void handleException(TransportException exp) { - logger.debug("shards active request failed for {}", exp, shardId); + logger.debug((Supplier) () -> new ParameterizedMessage("shards active request failed for {}", shardId), exp); if (awaitingResponses.decrementAndGet() == 0) { allNodesResponded(); } @@ -266,14 +268,14 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe try { indicesService.deleteShardStore("no longer used", shardId, currentState); } catch (Exception ex) { - logger.debug("{} failed to delete unallocated shard, ignoring", ex, shardId); + logger.debug((Supplier) () -> new ParameterizedMessage("{} failed to delete unallocated shard, ignoring", shardId), ex); } return currentState; } @Override public void onFailure(String source, Exception e) { - logger.error("{} unexpected error during deletion of unallocated shard", e, shardId); + logger.error((Supplier) () -> new ParameterizedMessage("{} unexpected error during deletion of unallocated shard", shardId), e); } }); } @@ -323,9 +325,9 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe try { channel.sendResponse(new ShardActiveResponse(shardActive, clusterService.localNode())); } catch (IOException e) { - logger.error("failed send response for shard active while trying to delete shard {} - shard will probably not be removed", e, request.shardId); + logger.error((Supplier) () -> new ParameterizedMessage("failed send response for shard active while trying to delete shard {} - shard will probably not be removed", request.shardId), e); } catch (EsRejectedExecutionException e) { - logger.error("failed send response for shard active while trying to delete shard {} - shard will probably not be removed", e, request.shardId); + logger.error((Supplier) () -> new ParameterizedMessage("failed send response for shard active while trying to delete shard {} - shard will probably not be removed", request.shardId), e); } } }, new ClusterStateObserver.ValidationPredicate() { diff --git a/core/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java b/core/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java index f0b61556924..4cdbed367c9 100644 --- a/core/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java +++ b/core/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java @@ -19,6 +19,8 @@ package org.elasticsearch.monitor.fs; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.Constants; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.collect.Tuple; @@ -112,7 +114,9 @@ public class FsProbe extends AbstractComponent { } catch (Exception e) { // do not fail Elasticsearch if something unexpected // happens here - logger.debug("unexpected exception processing /proc/diskstats for devices {}", e, devicesNumbers); + logger.debug( + (Supplier) () -> new ParameterizedMessage( + "unexpected exception processing /proc/diskstats for devices {}", devicesNumbers), e); return null; } } diff --git a/core/src/main/java/org/elasticsearch/monitor/fs/FsService.java b/core/src/main/java/org/elasticsearch/monitor/fs/FsService.java index 9f38538f269..96467b4d407 100644 --- a/core/src/main/java/org/elasticsearch/monitor/fs/FsService.java +++ b/core/src/main/java/org/elasticsearch/monitor/fs/FsService.java @@ -19,8 +19,8 @@ package org.elasticsearch.monitor.fs; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -55,7 +55,7 @@ public class FsService extends AbstractComponent { return cache.getOrRefresh(); } - private static FsInfo stats(FsProbe probe, FsInfo initialValue, ESLogger logger) { + private static FsInfo stats(FsProbe probe, FsInfo initialValue, Logger logger) { try { return probe.stats(initialValue); } catch (IOException e) { diff --git a/core/src/main/java/org/elasticsearch/monitor/jvm/JvmGcMonitorService.java b/core/src/main/java/org/elasticsearch/monitor/jvm/JvmGcMonitorService.java index a842ba28497..3a19fe5bd00 100644 --- a/core/src/main/java/org/elasticsearch/monitor/jvm/JvmGcMonitorService.java +++ b/core/src/main/java/org/elasticsearch/monitor/jvm/JvmGcMonitorService.java @@ -19,14 +19,13 @@ package org.elasticsearch.monitor.jvm; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.monitor.jvm.JvmStats.GarbageCollector; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool.Cancellable; @@ -36,7 +35,6 @@ import java.util.HashMap; import java.util.Locale; import java.util.Map; import java.util.Objects; -import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import java.util.function.BiFunction; @@ -207,7 +205,7 @@ public class JvmGcMonitorService extends AbstractLifecycleComponent { "[gc][{}][{}][{}] duration [{}], collections [{}]/[{}], total [{}]/[{}], memory [{}]->[{}]/[{}], all_pools {}"; static void logSlowGc( - final ESLogger logger, + final Logger logger, final JvmMonitor.Threshold threshold, final long seq, final JvmMonitor.SlowGcEvent slowGcEvent, @@ -307,7 +305,7 @@ public class JvmGcMonitorService extends AbstractLifecycleComponent { private static final String OVERHEAD_LOG_MESSAGE = "[gc][{}] overhead, spent [{}] collecting in the last [{}]"; static void logGcOverhead( - final ESLogger logger, + final Logger logger, final JvmMonitor.Threshold threshold, final long current, final long elapsed, diff --git a/core/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java b/core/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java index 1619ecee23a..ca0bb4f3e80 100644 --- a/core/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java +++ b/core/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java @@ -19,10 +19,9 @@ package org.elasticsearch.monitor.jvm; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -37,14 +36,10 @@ import java.lang.management.PlatformManagedObject; import java.lang.management.RuntimeMXBean; import java.lang.reflect.Method; import java.util.Collections; -import java.util.HashMap; import java.util.List; import java.util.Map; -/** - * - */ -public class JvmInfo implements Streamable, ToXContent { +public class JvmInfo implements Writeable, ToXContent { private static JvmInfo INSTANCE; @@ -61,100 +56,106 @@ public class JvmInfo implements Streamable, ToXContent { } catch (Exception e) { pid = -1; } - JvmInfo info = new JvmInfo(); - info.pid = pid; - info.startTime = runtimeMXBean.getStartTime(); - info.version = System.getProperty("java.version"); - info.vmName = runtimeMXBean.getVmName(); - info.vmVendor = runtimeMXBean.getVmVendor(); - info.vmVersion = runtimeMXBean.getVmVersion(); - info.mem = new Mem(); - info.mem.heapInit = memoryMXBean.getHeapMemoryUsage().getInit() < 0 ? 0 : memoryMXBean.getHeapMemoryUsage().getInit(); - info.mem.heapMax = memoryMXBean.getHeapMemoryUsage().getMax() < 0 ? 0 : memoryMXBean.getHeapMemoryUsage().getMax(); - info.mem.nonHeapInit = memoryMXBean.getNonHeapMemoryUsage().getInit() < 0 ? 0 : memoryMXBean.getNonHeapMemoryUsage().getInit(); - info.mem.nonHeapMax = memoryMXBean.getNonHeapMemoryUsage().getMax() < 0 ? 0 : memoryMXBean.getNonHeapMemoryUsage().getMax(); + + long heapInit = memoryMXBean.getHeapMemoryUsage().getInit() < 0 ? 0 : memoryMXBean.getHeapMemoryUsage().getInit(); + long heapMax = memoryMXBean.getHeapMemoryUsage().getMax() < 0 ? 0 : memoryMXBean.getHeapMemoryUsage().getMax(); + long nonHeapInit = memoryMXBean.getNonHeapMemoryUsage().getInit() < 0 ? 0 : memoryMXBean.getNonHeapMemoryUsage().getInit(); + long nonHeapMax = memoryMXBean.getNonHeapMemoryUsage().getMax() < 0 ? 0 : memoryMXBean.getNonHeapMemoryUsage().getMax(); + long directMemoryMax = 0; try { Class vmClass = Class.forName("sun.misc.VM"); - info.mem.directMemoryMax = (Long) vmClass.getMethod("maxDirectMemory").invoke(null); + directMemoryMax = (Long) vmClass.getMethod("maxDirectMemory").invoke(null); } catch (Exception t) { // ignore } - info.inputArguments = runtimeMXBean.getInputArguments().toArray(new String[runtimeMXBean.getInputArguments().size()]); + String[] inputArguments = runtimeMXBean.getInputArguments().toArray(new String[runtimeMXBean.getInputArguments().size()]); + Mem mem = new Mem(heapInit, heapMax, nonHeapInit, nonHeapMax, directMemoryMax); + + String bootClassPath; try { - info.bootClassPath = runtimeMXBean.getBootClassPath(); + bootClassPath = runtimeMXBean.getBootClassPath(); } catch (UnsupportedOperationException e) { // oracle java 9 - info.bootClassPath = System.getProperty("sun.boot.class.path"); - if (info.bootClassPath == null) { + bootClassPath = System.getProperty("sun.boot.class.path"); + if (bootClassPath == null) { // something else - info.bootClassPath = ""; + bootClassPath = ""; } } - info.classPath = runtimeMXBean.getClassPath(); - info.systemProperties = Collections.unmodifiableMap(runtimeMXBean.getSystemProperties()); + String classPath = runtimeMXBean.getClassPath(); + Map systemProperties = Collections.unmodifiableMap(runtimeMXBean.getSystemProperties()); List gcMxBeans = ManagementFactory.getGarbageCollectorMXBeans(); - info.gcCollectors = new String[gcMxBeans.size()]; + String[] gcCollectors = new String[gcMxBeans.size()]; for (int i = 0; i < gcMxBeans.size(); i++) { GarbageCollectorMXBean gcMxBean = gcMxBeans.get(i); - info.gcCollectors[i] = gcMxBean.getName(); + gcCollectors[i] = gcMxBean.getName(); } List memoryPoolMXBeans = ManagementFactory.getMemoryPoolMXBeans(); - info.memoryPools = new String[memoryPoolMXBeans.size()]; + String[] memoryPools = new String[memoryPoolMXBeans.size()]; for (int i = 0; i < memoryPoolMXBeans.size(); i++) { MemoryPoolMXBean memoryPoolMXBean = memoryPoolMXBeans.get(i); - info.memoryPools[i] = memoryPoolMXBean.getName(); + memoryPools[i] = memoryPoolMXBean.getName(); } + String onError = null; + String onOutOfMemoryError = null; + String useCompressedOops = "unknown"; + String useG1GC = "unknown"; + long configuredInitialHeapSize = -1; + long configuredMaxHeapSize = -1; try { @SuppressWarnings("unchecked") Class clazz = - (Class)Class.forName("com.sun.management.HotSpotDiagnosticMXBean"); + (Class)Class.forName("com.sun.management.HotSpotDiagnosticMXBean"); Class vmOptionClazz = Class.forName("com.sun.management.VMOption"); PlatformManagedObject hotSpotDiagnosticMXBean = ManagementFactory.getPlatformMXBean(clazz); Method vmOptionMethod = clazz.getMethod("getVMOption", String.class); Method valueMethod = vmOptionClazz.getMethod("getValue"); try { - Object onError = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "OnError"); - info.onError = (String) valueMethod.invoke(onError); + Object onErrorObject = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "OnError"); + onError = (String) valueMethod.invoke(onErrorObject); } catch (Exception ignored) { } try { - Object onOutOfMemoryError = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "OnOutOfMemoryError"); - info.onOutOfMemoryError = (String) valueMethod.invoke(onOutOfMemoryError); + Object onOutOfMemoryErrorObject = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "OnOutOfMemoryError"); + onOutOfMemoryError = (String) valueMethod.invoke(onOutOfMemoryErrorObject); } catch (Exception ignored) { } try { - Object useCompressedOopsVmOption = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "UseCompressedOops"); - info.useCompressedOops = (String) valueMethod.invoke(useCompressedOopsVmOption); + Object useCompressedOopsVmOptionObject = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "UseCompressedOops"); + useCompressedOops = (String) valueMethod.invoke(useCompressedOopsVmOptionObject); } catch (Exception ignored) { } try { - Object useG1GCVmOption = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "UseG1GC"); - info.useG1GC = (String) valueMethod.invoke(useG1GCVmOption); + Object useG1GCVmOptionObject = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "UseG1GC"); + useG1GC = (String) valueMethod.invoke(useG1GCVmOptionObject); } catch (Exception ignored) { } try { - Object initialHeapSizeVmOption = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "InitialHeapSize"); - info.configuredInitialHeapSize = Long.parseLong((String) valueMethod.invoke(initialHeapSizeVmOption)); + Object initialHeapSizeVmOptionObject = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "InitialHeapSize"); + configuredInitialHeapSize = Long.parseLong((String) valueMethod.invoke(initialHeapSizeVmOptionObject)); } catch (Exception ignored) { } try { - Object maxHeapSizeVmOption = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "MaxHeapSize"); - info.configuredMaxHeapSize = Long.parseLong((String) valueMethod.invoke(maxHeapSizeVmOption)); + Object maxHeapSizeVmOptionObject = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "MaxHeapSize"); + configuredMaxHeapSize = Long.parseLong((String) valueMethod.invoke(maxHeapSizeVmOptionObject)); } catch (Exception ignored) { } } catch (Exception ignored) { } - INSTANCE = info; + INSTANCE = new JvmInfo(pid, System.getProperty("java.version"), runtimeMXBean.getVmName(), runtimeMXBean.getVmVersion(), + runtimeMXBean.getVmVendor(), runtimeMXBean.getStartTime(), configuredInitialHeapSize, configuredMaxHeapSize, + mem, inputArguments, bootClassPath, classPath, systemProperties, gcCollectors, memoryPools, onError, onOutOfMemoryError, + useCompressedOops, useG1GC); } public static JvmInfo jvmInfo() { @@ -166,40 +167,100 @@ public class JvmInfo implements Streamable, ToXContent { return INSTANCE; } - long pid = -1; + private final long pid; + private final String version; + private final String vmName; + private final String vmVersion; + private final String vmVendor; + private final long startTime; + private final long configuredInitialHeapSize; + private final long configuredMaxHeapSize; + private final Mem mem; + private final String[] inputArguments; + private final String bootClassPath; + private final String classPath; + private final Map systemProperties; + private final String[] gcCollectors; + private final String[] memoryPools; + private final String onError; + private final String onOutOfMemoryError; + private final String useCompressedOops; + private final String useG1GC; - String version = ""; - String vmName = ""; - String vmVersion = ""; - String vmVendor = ""; + private JvmInfo(long pid, String version, String vmName, String vmVersion, String vmVendor, long startTime, + long configuredInitialHeapSize, long configuredMaxHeapSize, Mem mem, String[] inputArguments, String bootClassPath, + String classPath, Map systemProperties, String[] gcCollectors, String[] memoryPools, String onError, + String onOutOfMemoryError, String useCompressedOops, String useG1GC) { + this.pid = pid; + this.version = version; + this.vmName = vmName; + this.vmVersion = vmVersion; + this.vmVendor = vmVendor; + this.startTime = startTime; + this.configuredInitialHeapSize = configuredInitialHeapSize; + this.configuredMaxHeapSize = configuredMaxHeapSize; + this.mem = mem; + this.inputArguments = inputArguments; + this.bootClassPath = bootClassPath; + this.classPath = classPath; + this.systemProperties = systemProperties; + this.gcCollectors = gcCollectors; + this.memoryPools = memoryPools; + this.onError = onError; + this.onOutOfMemoryError = onOutOfMemoryError; + this.useCompressedOops = useCompressedOops; + this.useG1GC = useG1GC; + } - long startTime = -1; + public JvmInfo(StreamInput in) throws IOException { + pid = in.readLong(); + version = in.readString(); + vmName = in.readString(); + vmVersion = in.readString(); + vmVendor = in.readString(); + startTime = in.readLong(); + inputArguments = new String[in.readInt()]; + for (int i = 0; i < inputArguments.length; i++) { + inputArguments[i] = in.readString(); + } + bootClassPath = in.readString(); + classPath = in.readString(); + systemProperties = in.readMap(StreamInput::readString, StreamInput::readString); + mem = new Mem(in); + gcCollectors = in.readStringArray(); + memoryPools = in.readStringArray(); + useCompressedOops = in.readString(); + //the following members are only used locally for boostrap checks, never serialized nor printed out + this.configuredMaxHeapSize = -1; + this.configuredInitialHeapSize = -1; + this.onError = null; + this.onOutOfMemoryError = null; + this.useG1GC = "unknown"; + } - private long configuredInitialHeapSize; - private long configuredMaxHeapSize; - - Mem mem; - - String[] inputArguments; - - String bootClassPath; - - String classPath; - - Map systemProperties; - - String[] gcCollectors = Strings.EMPTY_ARRAY; - String[] memoryPools = Strings.EMPTY_ARRAY; - - private String onError; - - private String onOutOfMemoryError; - - private String useCompressedOops = "unknown"; - - private String useG1GC = "unknown"; - - private JvmInfo() { + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeLong(pid); + out.writeString(version); + out.writeString(vmName); + out.writeString(vmVersion); + out.writeString(vmVendor); + out.writeLong(startTime); + out.writeInt(inputArguments.length); + for (String inputArgument : inputArguments) { + out.writeString(inputArgument); + } + out.writeString(bootClassPath); + out.writeString(classPath); + out.writeVInt(this.systemProperties.size()); + for (Map.Entry entry : systemProperties.entrySet()) { + out.writeString(entry.getKey()); + out.writeString(entry.getValue()); + } + mem.writeTo(out); + out.writeStringArray(gcCollectors); + out.writeStringArray(memoryPools); + out.writeString(useCompressedOops); } /** @@ -354,6 +415,14 @@ public class JvmInfo implements Streamable, ToXContent { return this.useG1GC; } + public String[] getGcCollectors() { + return gcCollectors; + } + + public String[] getMemoryPools() { + return memoryPools; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(Fields.JVM); @@ -407,72 +476,37 @@ public class JvmInfo implements Streamable, ToXContent { static final String USING_COMPRESSED_OOPS = "using_compressed_ordinary_object_pointers"; } - public static JvmInfo readJvmInfo(StreamInput in) throws IOException { - JvmInfo jvmInfo = new JvmInfo(); - jvmInfo.readFrom(in); - return jvmInfo; - } + public static class Mem implements Writeable { - @Override - public void readFrom(StreamInput in) throws IOException { - pid = in.readLong(); - version = in.readString(); - vmName = in.readString(); - vmVersion = in.readString(); - vmVendor = in.readString(); - startTime = in.readLong(); - inputArguments = new String[in.readInt()]; - for (int i = 0; i < inputArguments.length; i++) { - inputArguments[i] = in.readString(); + private final long heapInit; + private final long heapMax; + private final long nonHeapInit; + private final long nonHeapMax; + private final long directMemoryMax; + + public Mem(long heapInit, long heapMax, long nonHeapInit, long nonHeapMax, long directMemoryMax) { + this.heapInit = heapInit; + this.heapMax = heapMax; + this.nonHeapInit = nonHeapInit; + this.nonHeapMax = nonHeapMax; + this.directMemoryMax = directMemoryMax; } - bootClassPath = in.readString(); - classPath = in.readString(); - systemProperties = new HashMap<>(); - int size = in.readInt(); - for (int i = 0; i < size; i++) { - systemProperties.put(in.readString(), in.readString()); + + public Mem(StreamInput in) throws IOException { + this.heapInit = in.readVLong(); + this.heapMax = in.readVLong(); + this.nonHeapInit = in.readVLong(); + this.nonHeapMax = in.readVLong(); + this.directMemoryMax = in.readVLong(); } - mem = new Mem(); - mem.readFrom(in); - gcCollectors = in.readStringArray(); - memoryPools = in.readStringArray(); - useCompressedOops = in.readString(); - } - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeLong(pid); - out.writeString(version); - out.writeString(vmName); - out.writeString(vmVersion); - out.writeString(vmVendor); - out.writeLong(startTime); - out.writeInt(inputArguments.length); - for (String inputArgument : inputArguments) { - out.writeString(inputArgument); - } - out.writeString(bootClassPath); - out.writeString(classPath); - out.writeInt(systemProperties.size()); - for (Map.Entry entry : systemProperties.entrySet()) { - out.writeString(entry.getKey()); - out.writeString(entry.getValue()); - } - mem.writeTo(out); - out.writeStringArray(gcCollectors); - out.writeStringArray(memoryPools); - out.writeString(useCompressedOops); - } - - public static class Mem implements Streamable { - - long heapInit = 0; - long heapMax = 0; - long nonHeapInit = 0; - long nonHeapMax = 0; - long directMemoryMax = 0; - - Mem() { + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(heapInit); + out.writeVLong(heapMax); + out.writeVLong(nonHeapInit); + out.writeVLong(nonHeapMax); + out.writeVLong(directMemoryMax); } public ByteSizeValue getHeapInit() { @@ -494,23 +528,5 @@ public class JvmInfo implements Streamable, ToXContent { public ByteSizeValue getDirectMemoryMax() { return new ByteSizeValue(directMemoryMax); } - - @Override - public void readFrom(StreamInput in) throws IOException { - heapInit = in.readVLong(); - heapMax = in.readVLong(); - nonHeapInit = in.readVLong(); - nonHeapMax = in.readVLong(); - directMemoryMax = in.readVLong(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVLong(heapInit); - out.writeVLong(heapMax); - out.writeVLong(nonHeapInit); - out.writeVLong(nonHeapMax); - out.writeVLong(directMemoryMax); - } } } diff --git a/core/src/main/java/org/elasticsearch/monitor/os/DummyOsInfo.java b/core/src/main/java/org/elasticsearch/monitor/os/DummyOsInfo.java index 599755e78a4..af6ea851803 100644 --- a/core/src/main/java/org/elasticsearch/monitor/os/DummyOsInfo.java +++ b/core/src/main/java/org/elasticsearch/monitor/os/DummyOsInfo.java @@ -21,13 +21,8 @@ package org.elasticsearch.monitor.os; public class DummyOsInfo extends OsInfo { - DummyOsInfo() { - refreshInterval = 0; - availableProcessors = 0; - allocatedProcessors = 0; - name = "dummy_name"; - arch = "dummy_arch"; - version = "dummy_version"; + private DummyOsInfo() { + super(0, 0, 0, "dummy_name", "dummy_arch", "dummy_version"); } public static final DummyOsInfo INSTANCE = new DummyOsInfo(); diff --git a/core/src/main/java/org/elasticsearch/monitor/os/OsInfo.java b/core/src/main/java/org/elasticsearch/monitor/os/OsInfo.java index f0520358524..7a0175c31d1 100644 --- a/core/src/main/java/org/elasticsearch/monitor/os/OsInfo.java +++ b/core/src/main/java/org/elasticsearch/monitor/os/OsInfo.java @@ -21,25 +21,47 @@ package org.elasticsearch.monitor.os; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; -public class OsInfo implements Streamable, ToXContent { +public class OsInfo implements Writeable, ToXContent { - long refreshInterval; + private final long refreshInterval; + private final int availableProcessors; + private final int allocatedProcessors; + private final String name; + private final String arch; + private final String version; - int availableProcessors; + public OsInfo(long refreshInterval, int availableProcessors, int allocatedProcessors, String name, String arch, String version) { + this.refreshInterval = refreshInterval; + this.availableProcessors = availableProcessors; + this.allocatedProcessors = allocatedProcessors; + this.name = name; + this.arch = arch; + this.version = version; + } - int allocatedProcessors; + public OsInfo(StreamInput in) throws IOException { + this.refreshInterval = in.readLong(); + this.availableProcessors = in.readInt(); + this.allocatedProcessors = in.readInt(); + this.name = in.readOptionalString(); + this.arch = in.readOptionalString(); + this.version = in.readOptionalString(); + } - String name = null; - String arch = null; - String version = null; - - OsInfo() { + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeLong(refreshInterval); + out.writeInt(availableProcessors); + out.writeInt(allocatedProcessors); + out.writeOptionalString(name); + out.writeOptionalString(arch); + out.writeOptionalString(version); } public long getRefreshInterval() { @@ -95,30 +117,4 @@ public class OsInfo implements Streamable, ToXContent { builder.endObject(); return builder; } - - public static OsInfo readOsInfo(StreamInput in) throws IOException { - OsInfo info = new OsInfo(); - info.readFrom(in); - return info; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - refreshInterval = in.readLong(); - availableProcessors = in.readInt(); - allocatedProcessors = in.readInt(); - name = in.readOptionalString(); - arch = in.readOptionalString(); - version = in.readOptionalString(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeLong(refreshInterval); - out.writeInt(availableProcessors); - out.writeInt(allocatedProcessors); - out.writeOptionalString(name); - out.writeOptionalString(arch); - out.writeOptionalString(version); - } } diff --git a/core/src/main/java/org/elasticsearch/monitor/os/OsProbe.java b/core/src/main/java/org/elasticsearch/monitor/os/OsProbe.java index 017f961a315..08abfc05f1d 100644 --- a/core/src/main/java/org/elasticsearch/monitor/os/OsProbe.java +++ b/core/src/main/java/org/elasticsearch/monitor/os/OsProbe.java @@ -163,33 +163,16 @@ public class OsProbe { private OsProbe() { } - public OsInfo osInfo() { - OsInfo info = new OsInfo(); - info.availableProcessors = Runtime.getRuntime().availableProcessors(); - info.name = Constants.OS_NAME; - info.arch = Constants.OS_ARCH; - info.version = Constants.OS_VERSION; - return info; + public OsInfo osInfo(long refreshInterval, int allocatedProcessors) { + return new OsInfo(refreshInterval, Runtime.getRuntime().availableProcessors(), + allocatedProcessors, Constants.OS_NAME, Constants.OS_ARCH, Constants.OS_VERSION); } public OsStats osStats() { - OsStats stats = new OsStats(); - stats.timestamp = System.currentTimeMillis(); - stats.cpu = new OsStats.Cpu(); - stats.cpu.percent = getSystemCpuPercent(); - stats.cpu.loadAverage = getSystemLoadAverage(); - - OsStats.Mem mem = new OsStats.Mem(); - mem.total = getTotalPhysicalMemorySize(); - mem.free = getFreePhysicalMemorySize(); - stats.mem = mem; - - OsStats.Swap swap = new OsStats.Swap(); - swap.total = getTotalSwapSpaceSize(); - swap.free = getFreeSwapSpaceSize(); - stats.swap = swap; - - return stats; + OsStats.Cpu cpu = new OsStats.Cpu(getSystemCpuPercent(), getSystemLoadAverage()); + OsStats.Mem mem = new OsStats.Mem(getTotalPhysicalMemorySize(), getFreePhysicalMemorySize()); + OsStats.Swap swap = new OsStats.Swap(getTotalSwapSpaceSize(), getFreeSwapSpaceSize()); + return new OsStats(System.currentTimeMillis(), cpu, mem , swap); } /** diff --git a/core/src/main/java/org/elasticsearch/monitor/os/OsService.java b/core/src/main/java/org/elasticsearch/monitor/os/OsService.java index 248b49f21cb..cb67eef852c 100644 --- a/core/src/main/java/org/elasticsearch/monitor/os/OsService.java +++ b/core/src/main/java/org/elasticsearch/monitor/os/OsService.java @@ -27,32 +27,22 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.SingleObjectCache; import org.elasticsearch.common.util.concurrent.EsExecutors; -/** - * - */ public class OsService extends AbstractComponent { private final OsProbe probe; - private final OsInfo info; - - private SingleObjectCache osStatsCache; + private final SingleObjectCache osStatsCache; public static final Setting REFRESH_INTERVAL_SETTING = Setting.timeSetting("monitor.os.refresh_interval", TimeValue.timeValueSeconds(1), TimeValue.timeValueSeconds(1), - Property.NodeScope); + Property.NodeScope); public OsService(Settings settings) { super(settings); this.probe = OsProbe.getInstance(); - TimeValue refreshInterval = REFRESH_INTERVAL_SETTING.get(settings); - - this.info = probe.osInfo(); - this.info.refreshInterval = refreshInterval.millis(); - this.info.allocatedProcessors = EsExecutors.boundedNumberOfProcessors(settings); - - osStatsCache = new OsStatsCache(refreshInterval, probe.osStats()); + this.info = probe.osInfo(refreshInterval.millis(), EsExecutors.boundedNumberOfProcessors(settings)); + this.osStatsCache = new OsStatsCache(refreshInterval, probe.osStats()); logger.debug("using refresh_interval [{}]", refreshInterval); } diff --git a/core/src/main/java/org/elasticsearch/monitor/os/OsStats.java b/core/src/main/java/org/elasticsearch/monitor/os/OsStats.java index 51302d7ae6b..06d864f31a4 100644 --- a/core/src/main/java/org/elasticsearch/monitor/os/OsStats.java +++ b/core/src/main/java/org/elasticsearch/monitor/os/OsStats.java @@ -21,28 +21,42 @@ package org.elasticsearch.monitor.os; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; import java.util.Arrays; +import java.util.Objects; -/** - * - */ -public class OsStats implements Streamable, ToXContent { +public class OsStats implements Writeable, ToXContent { - long timestamp; + private final long timestamp; + private final Cpu cpu; + private final Mem mem; + private final Swap swap; - Cpu cpu = null; + public OsStats(long timestamp, Cpu cpu, Mem mem, Swap swap) { + this.timestamp = timestamp; + this.cpu = Objects.requireNonNull(cpu, "cpu must not be null"); + this.mem = Objects.requireNonNull(mem, "mem must not be null");; + this.swap = Objects.requireNonNull(swap, "swap must not be null");; + } - Mem mem = null; + public OsStats(StreamInput in) throws IOException { + this.timestamp = in.readVLong(); + this.cpu = new Cpu(in); + this.mem = new Mem(in); + this.swap = new Swap(in); + } - Swap swap = null; - - OsStats() { + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(timestamp); + cpu.writeTo(out); + mem.writeTo(out); + swap.writeTo(out); } public long getTimestamp() { @@ -65,9 +79,9 @@ public class OsStats implements Streamable, ToXContent { static final String CPU = "cpu"; static final String PERCENT = "percent"; static final String LOAD_AVERAGE = "load_average"; - static final String LOAD_AVERAGE_1M = new String("1m"); - static final String LOAD_AVERAGE_5M = new String("5m"); - static final String LOAD_AVERAGE_15M = new String("15m"); + static final String LOAD_AVERAGE_1M = "1m"; + static final String LOAD_AVERAGE_5M = "5m"; + static final String LOAD_AVERAGE_15M = "15m"; static final String MEM = "mem"; static final String SWAP = "swap"; @@ -86,105 +100,29 @@ public class OsStats implements Streamable, ToXContent { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(Fields.OS); builder.field(Fields.TIMESTAMP, getTimestamp()); - if (cpu != null) { - builder.startObject(Fields.CPU); - builder.field(Fields.PERCENT, cpu.getPercent()); - if (cpu.getLoadAverage() != null && Arrays.stream(cpu.getLoadAverage()).anyMatch(load -> load != -1)) { - builder.startObject(Fields.LOAD_AVERAGE); - if (cpu.getLoadAverage()[0] != -1) { - builder.field(Fields.LOAD_AVERAGE_1M, cpu.getLoadAverage()[0]); - } - if (cpu.getLoadAverage()[1] != -1) { - builder.field(Fields.LOAD_AVERAGE_5M, cpu.getLoadAverage()[1]); - } - if (cpu.getLoadAverage()[2] != -1) { - builder.field(Fields.LOAD_AVERAGE_15M, cpu.getLoadAverage()[2]); - } - builder.endObject(); - } - builder.endObject(); - } - - if (mem != null) { - builder.startObject(Fields.MEM); - builder.byteSizeField(Fields.TOTAL_IN_BYTES, Fields.TOTAL, mem.getTotal()); - builder.byteSizeField(Fields.FREE_IN_BYTES, Fields.FREE, mem.getFree()); - builder.byteSizeField(Fields.USED_IN_BYTES, Fields.USED, mem.getUsed()); - - builder.field(Fields.FREE_PERCENT, mem.getFreePercent()); - builder.field(Fields.USED_PERCENT, mem.getUsedPercent()); - - builder.endObject(); - } - - if (swap != null) { - builder.startObject(Fields.SWAP); - builder.byteSizeField(Fields.TOTAL_IN_BYTES, Fields.TOTAL, swap.getTotal()); - builder.byteSizeField(Fields.FREE_IN_BYTES, Fields.FREE, swap.getFree()); - builder.byteSizeField(Fields.USED_IN_BYTES, Fields.USED, swap.getUsed()); - builder.endObject(); - } - + cpu.toXContent(builder, params); + mem.toXContent(builder, params); + swap.toXContent(builder, params); builder.endObject(); return builder; } - public static OsStats readOsStats(StreamInput in) throws IOException { - OsStats stats = new OsStats(); - stats.readFrom(in); - return stats; - } + public static class Cpu implements Writeable, ToXContent { - @Override - public void readFrom(StreamInput in) throws IOException { - timestamp = in.readVLong(); - cpu = in.readOptionalStreamable(Cpu::new); - if (in.readBoolean()) { - mem = Mem.readMem(in); - } - if (in.readBoolean()) { - swap = Swap.readSwap(in); - } - } + private final short percent; + private final double[] loadAverage; - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVLong(timestamp); - out.writeOptionalStreamable(cpu); - if (mem == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - mem.writeTo(out); - } - if (swap == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - swap.writeTo(out); - } - } - - public static class Cpu implements Streamable { - - short percent = -1; - double[] loadAverage = null; - - Cpu() {} - - public static Cpu readCpu(StreamInput in) throws IOException { - Cpu cpu = new Cpu(); - cpu.readFrom(in); - return cpu; + public Cpu(short systemCpuPercent, double[] systemLoadAverage) { + this.percent = systemCpuPercent; + this.loadAverage = systemLoadAverage; } - @Override - public void readFrom(StreamInput in) throws IOException { - percent = in.readShort(); + public Cpu(StreamInput in) throws IOException { + this.percent = in.readShort(); if (in.readBoolean()) { - loadAverage = in.readDoubleArray(); + this.loadAverage = in.readDoubleArray(); } else { - loadAverage = null; + this.loadAverage = null; } } @@ -206,12 +144,49 @@ public class OsStats implements Streamable, ToXContent { public double[] getLoadAverage() { return loadAverage; } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(Fields.CPU); + builder.field(Fields.PERCENT, getPercent()); + if (getLoadAverage() != null && Arrays.stream(getLoadAverage()).anyMatch(load -> load != -1)) { + builder.startObject(Fields.LOAD_AVERAGE); + if (getLoadAverage()[0] != -1) { + builder.field(Fields.LOAD_AVERAGE_1M, getLoadAverage()[0]); + } + if (getLoadAverage()[1] != -1) { + builder.field(Fields.LOAD_AVERAGE_5M, getLoadAverage()[1]); + } + if (getLoadAverage()[2] != -1) { + builder.field(Fields.LOAD_AVERAGE_15M, getLoadAverage()[2]); + } + builder.endObject(); + } + builder.endObject(); + return builder; + } } - public static class Swap implements Streamable { + public static class Swap implements Writeable, ToXContent { - long total = -1; - long free = -1; + private final long total; + private final long free; + + public Swap(long total, long free) { + this.total = total; + this.free = free; + } + + public Swap(StreamInput in) throws IOException { + this.total = in.readLong(); + this.free = in.readLong(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeLong(total); + out.writeLong(free); + } public ByteSizeValue getFree() { return new ByteSizeValue(free); @@ -225,40 +200,30 @@ public class OsStats implements Streamable, ToXContent { return new ByteSizeValue(total); } - public static Swap readSwap(StreamInput in) throws IOException { - Swap swap = new Swap(); - swap.readFrom(in); - return swap; - } - @Override - public void readFrom(StreamInput in) throws IOException { - total = in.readLong(); - free = in.readLong(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeLong(total); - out.writeLong(free); + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(Fields.SWAP); + builder.byteSizeField(Fields.TOTAL_IN_BYTES, Fields.TOTAL, getTotal()); + builder.byteSizeField(Fields.FREE_IN_BYTES, Fields.FREE, getFree()); + builder.byteSizeField(Fields.USED_IN_BYTES, Fields.USED, getUsed()); + builder.endObject(); + return builder; } } - public static class Mem implements Streamable { + public static class Mem implements Writeable, ToXContent { - long total = -1; - long free = -1; + private final long total; + private final long free; - public static Mem readMem(StreamInput in) throws IOException { - Mem mem = new Mem(); - mem.readFrom(in); - return mem; + public Mem(long total, long free) { + this.total = total; + this.free = free; } - @Override - public void readFrom(StreamInput in) throws IOException { - total = in.readLong(); - free = in.readLong(); + public Mem(StreamInput in) throws IOException { + this.total = in.readLong(); + this.free = in.readLong(); } @Override @@ -276,7 +241,7 @@ public class OsStats implements Streamable, ToXContent { } public short getUsedPercent() { - return calculatePercentage(getUsed().bytes(), getTotal().bytes()); + return calculatePercentage(getUsed().bytes(), total); } public ByteSizeValue getFree() { @@ -284,11 +249,23 @@ public class OsStats implements Streamable, ToXContent { } public short getFreePercent() { - return calculatePercentage(getFree().bytes(), getTotal().bytes()); + return calculatePercentage(free, total); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(Fields.MEM); + builder.byteSizeField(Fields.TOTAL_IN_BYTES, Fields.TOTAL, getTotal()); + builder.byteSizeField(Fields.FREE_IN_BYTES, Fields.FREE, getFree()); + builder.byteSizeField(Fields.USED_IN_BYTES, Fields.USED, getUsed()); + builder.field(Fields.FREE_PERCENT, getFreePercent()); + builder.field(Fields.USED_PERCENT, getUsedPercent()); + builder.endObject(); + return builder; } } - private static short calculatePercentage(long used, long max) { + public static short calculatePercentage(long used, long max) { return max <= 0 ? 0 : (short) (Math.round((100d * used) / max)); } } diff --git a/core/src/main/java/org/elasticsearch/monitor/process/ProcessInfo.java b/core/src/main/java/org/elasticsearch/monitor/process/ProcessInfo.java index cf9c9e63b87..a0e3e7a70f2 100644 --- a/core/src/main/java/org/elasticsearch/monitor/process/ProcessInfo.java +++ b/core/src/main/java/org/elasticsearch/monitor/process/ProcessInfo.java @@ -21,26 +21,35 @@ package org.elasticsearch.monitor.process; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; -public class ProcessInfo implements Streamable, ToXContent { +public class ProcessInfo implements Writeable, ToXContent { - long refreshInterval; + private final long refreshInterval; + private final long id; + private final boolean mlockall; - private long id; - - private boolean mlockall; - - ProcessInfo() { - } - - public ProcessInfo(long id, boolean mlockall) { + public ProcessInfo(long id, boolean mlockall, long refreshInterval) { this.id = id; this.mlockall = mlockall; + this.refreshInterval = refreshInterval; + } + + public ProcessInfo(StreamInput in) throws IOException { + refreshInterval = in.readLong(); + id = in.readLong(); + mlockall = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeLong(refreshInterval); + out.writeLong(id); + out.writeBoolean(mlockall); } public long refreshInterval() { @@ -79,24 +88,4 @@ public class ProcessInfo implements Streamable, ToXContent { builder.endObject(); return builder; } - - public static ProcessInfo readProcessInfo(StreamInput in) throws IOException { - ProcessInfo info = new ProcessInfo(); - info.readFrom(in); - return info; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - refreshInterval = in.readLong(); - id = in.readLong(); - mlockall = in.readBoolean(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeLong(refreshInterval); - out.writeLong(id); - out.writeBoolean(mlockall); - } } diff --git a/core/src/main/java/org/elasticsearch/monitor/process/ProcessProbe.java b/core/src/main/java/org/elasticsearch/monitor/process/ProcessProbe.java index b19b54a9478..e4307f724c5 100644 --- a/core/src/main/java/org/elasticsearch/monitor/process/ProcessProbe.java +++ b/core/src/main/java/org/elasticsearch/monitor/process/ProcessProbe.java @@ -126,8 +126,8 @@ public class ProcessProbe { return -1; } - public ProcessInfo processInfo() { - return new ProcessInfo(jvmInfo().pid(), BootstrapInfo.isMemoryLocked()); + public ProcessInfo processInfo(long refreshInterval) { + return new ProcessInfo(jvmInfo().pid(), BootstrapInfo.isMemoryLocked(), refreshInterval); } public ProcessStats processStats() { diff --git a/core/src/main/java/org/elasticsearch/monitor/process/ProcessService.java b/core/src/main/java/org/elasticsearch/monitor/process/ProcessService.java index 38593534480..99593003b34 100644 --- a/core/src/main/java/org/elasticsearch/monitor/process/ProcessService.java +++ b/core/src/main/java/org/elasticsearch/monitor/process/ProcessService.java @@ -42,11 +42,9 @@ public final class ProcessService extends AbstractComponent { public ProcessService(Settings settings) { super(settings); this.probe = ProcessProbe.getInstance(); - final TimeValue refreshInterval = REFRESH_INTERVAL_SETTING.get(settings); processStatsCache = new ProcessStatsCache(refreshInterval, probe.processStats()); - this.info = probe.processInfo(); - this.info.refreshInterval = refreshInterval.millis(); + this.info = probe.processInfo(refreshInterval.millis()); logger.debug("using refresh_interval [{}]", refreshInterval); } diff --git a/core/src/main/java/org/elasticsearch/node/Node.java b/core/src/main/java/org/elasticsearch/node/Node.java index e5c61947ae7..402064e88d2 100644 --- a/core/src/main/java/org/elasticsearch/node/Node.java +++ b/core/src/main/java/org/elasticsearch/node/Node.java @@ -19,6 +19,10 @@ package org.elasticsearch.node; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.LoggerContext; +import org.apache.logging.log4j.core.config.Configurator; import org.apache.lucene.util.Constants; import org.apache.lucene.util.IOUtils; import org.elasticsearch.Build; @@ -51,7 +55,6 @@ import org.elasticsearch.common.inject.ModulesBuilder; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkModule; @@ -119,6 +122,7 @@ import org.elasticsearch.transport.TransportService; import org.elasticsearch.tribe.TribeService; import org.elasticsearch.watcher.ResourceWatcherService; +import javax.management.MBeanServerPermission; import java.io.BufferedWriter; import java.io.Closeable; import java.io.IOException; @@ -129,11 +133,13 @@ import java.nio.charset.Charset; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardCopyOption; +import java.security.AccessControlException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -216,7 +222,7 @@ public class Node implements Closeable { boolean success = false; { // use temp logger just to say we are starting. we can't use it later on because the node name might not be set - ESLogger logger = Loggers.getLogger(Node.class, NODE_NAME_SETTING.get(environment.settings())); + Logger logger = Loggers.getLogger(Node.class, NODE_NAME_SETTING.get(environment.settings())); logger.info("initializing ..."); } @@ -236,7 +242,7 @@ public class Node implements Closeable { final boolean hadPredefinedNodeName = NODE_NAME_SETTING.exists(tmpSettings); tmpSettings = addNodeNameIfNeeded(tmpSettings, nodeEnvironment.nodeId()); - ESLogger logger = Loggers.getLogger(Node.class, tmpSettings); + Logger logger = Loggers.getLogger(Node.class, tmpSettings); if (hadPredefinedNodeName == false) { logger.info("node name [{}] derived from node ID; set [{}] to override", NODE_NAME_SETTING.get(tmpSettings), NODE_NAME_SETTING.getKey()); @@ -454,7 +460,7 @@ public class Node implements Closeable { return this; } - ESLogger logger = Loggers.getLogger(Node.class, NODE_NAME_SETTING.get(settings)); + Logger logger = Loggers.getLogger(Node.class, NODE_NAME_SETTING.get(settings)); logger.info("starting ..."); // hack around dependency injection problem (for now...) injector.getInstance(Discovery.class).setAllocationService(injector.getInstance(AllocationService.class)); @@ -569,7 +575,7 @@ public class Node implements Closeable { if (!lifecycle.moveToStopped()) { return this; } - ESLogger logger = Loggers.getLogger(Node.class, NODE_NAME_SETTING.get(settings)); + Logger logger = Loggers.getLogger(Node.class, NODE_NAME_SETTING.get(settings)); logger.info("stopping ..."); injector.getInstance(TribeService.class).stop(); @@ -600,6 +606,24 @@ public class Node implements Closeable { injector.getInstance(IndicesService.class).stop(); logger.info("stopped"); + final String log4jShutdownEnabled = System.getProperty("es.log4j.shutdownEnabled", "true"); + final boolean shutdownEnabled; + switch (log4jShutdownEnabled) { + case "true": + shutdownEnabled = true; + break; + case "false": + shutdownEnabled = false; + break; + default: + throw new IllegalArgumentException( + "invalid value for [es.log4j.shutdownEnabled], was [" + log4jShutdownEnabled + "] but must be [true] or [false]"); + } + if (shutdownEnabled) { + LoggerContext context = (LoggerContext) LogManager.getContext(false); + Configurator.shutdown(context); + } + return this; } @@ -615,7 +639,7 @@ public class Node implements Closeable { return; } - ESLogger logger = Loggers.getLogger(Node.class, NODE_NAME_SETTING.get(settings)); + Logger logger = Loggers.getLogger(Node.class, NODE_NAME_SETTING.get(settings)); logger.info("closing ..."); List toClose = new ArrayList<>(); StopWatch stopWatch = new StopWatch("node_close"); diff --git a/core/src/main/java/org/elasticsearch/plugins/DummyPluginInfo.java b/core/src/main/java/org/elasticsearch/plugins/DummyPluginInfo.java index 5dab19581a3..90b1d32f4ae 100644 --- a/core/src/main/java/org/elasticsearch/plugins/DummyPluginInfo.java +++ b/core/src/main/java/org/elasticsearch/plugins/DummyPluginInfo.java @@ -24,5 +24,6 @@ public class DummyPluginInfo extends PluginInfo { super(name, description, version, classname); } - public static final DummyPluginInfo INSTANCE = new DummyPluginInfo("dummy_plugin_name", "dummy plugin description", "dummy_plugin_version", "DummyPluginName"); + public static final DummyPluginInfo INSTANCE = new DummyPluginInfo( + "dummy_plugin_name", "dummy plugin description", "dummy_plugin_version", "DummyPluginName"); } diff --git a/core/src/main/java/org/elasticsearch/plugins/PluginCli.java b/core/src/main/java/org/elasticsearch/plugins/PluginCli.java index 3ce60882cce..ba401404f2e 100644 --- a/core/src/main/java/org/elasticsearch/plugins/PluginCli.java +++ b/core/src/main/java/org/elasticsearch/plugins/PluginCli.java @@ -31,7 +31,7 @@ import org.elasticsearch.node.internal.InternalSettingsPreparer; */ public class PluginCli extends MultiCommand { - public PluginCli() { + private PluginCli() { super("A tool for managing installed elasticsearch plugins"); subcommands.put("list", new ListPluginsCommand()); subcommands.put("install", new InstallPluginCommand()); @@ -39,7 +39,7 @@ public class PluginCli extends MultiCommand { } public static void main(String[] args) throws Exception { - // initialize default for es.logger.level because we will not read the logging.yml + // initialize default for es.logger.level because we will not read the log4j2.properties String loggerLevel = System.getProperty("es.logger.level", "INFO"); String pathHome = System.getProperty("es.path.home"); // Set the appender for all potential log files to terminal so that other components that use the logger print out the @@ -50,11 +50,9 @@ public class PluginCli extends MultiCommand { // Therefore we print to Terminal. Environment loggingEnvironment = InternalSettingsPreparer.prepareEnvironment(Settings.builder() .put("path.home", pathHome) - .put("appender.terminal.type", "terminal") - .put("rootLogger", "${logger.level}, terminal") .put("logger.level", loggerLevel) .build(), Terminal.DEFAULT); - LogConfigurator.configure(loggingEnvironment.settings(), false); + LogConfigurator.configure(loggingEnvironment, false); exit(new PluginCli().main(args, Terminal.DEFAULT)); } diff --git a/core/src/main/java/org/elasticsearch/plugins/PluginInfo.java b/core/src/main/java/org/elasticsearch/plugins/PluginInfo.java index 500861d8999..3e241eadd37 100644 --- a/core/src/main/java/org/elasticsearch/plugins/PluginInfo.java +++ b/core/src/main/java/org/elasticsearch/plugins/PluginInfo.java @@ -22,7 +22,7 @@ import org.elasticsearch.Version; import org.elasticsearch.bootstrap.JarHell; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -32,7 +32,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.util.Properties; -public class PluginInfo implements Streamable, ToXContent { +public class PluginInfo implements Writeable, ToXContent { public static final String ES_PLUGIN_PROPERTIES = "plugin-descriptor.properties"; public static final String ES_PLUGIN_POLICY = "plugin-security.policy"; @@ -45,13 +45,10 @@ public class PluginInfo implements Streamable, ToXContent { static final String CLASSNAME = "classname"; } - private String name; - private String description; - private String version; - private String classname; - - public PluginInfo() { - } + private final String name; + private final String description; + private final String version; + private final String classname; /** * Information about plugins @@ -60,13 +57,28 @@ public class PluginInfo implements Streamable, ToXContent { * @param description Its description * @param version Version number */ - PluginInfo(String name, String description, String version, String classname) { + public PluginInfo(String name, String description, String version, String classname) { this.name = name; this.description = description; this.version = version; this.classname = classname; } + public PluginInfo(StreamInput in) throws IOException { + this.name = in.readString(); + this.description = in.readString(); + this.version = in.readString(); + this.classname = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(name); + out.writeString(description); + out.writeString(version); + out.writeString(classname); + } + /** reads (and validates) plugin metadata descriptor file */ public static PluginInfo readFromProperties(Path dir) throws IOException { Path descriptor = dir.resolve(ES_PLUGIN_PROPERTIES); @@ -138,28 +150,6 @@ public class PluginInfo implements Streamable, ToXContent { return version; } - public static PluginInfo readFromStream(StreamInput in) throws IOException { - PluginInfo info = new PluginInfo(); - info.readFrom(in); - return info; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - this.name = in.readString(); - this.description = in.readString(); - this.version = in.readString(); - this.classname = in.readString(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(name); - out.writeString(description); - out.writeString(version); - out.writeString(classname); - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); diff --git a/core/src/main/java/org/elasticsearch/plugins/PluginsService.java b/core/src/main/java/org/elasticsearch/plugins/PluginsService.java index ccbde1310d5..01704e9ed86 100644 --- a/core/src/main/java/org/elasticsearch/plugins/PluginsService.java +++ b/core/src/main/java/org/elasticsearch/plugins/PluginsService.java @@ -19,6 +19,31 @@ package org.elasticsearch.plugins; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; +import org.apache.lucene.analysis.util.CharFilterFactory; +import org.apache.lucene.analysis.util.TokenFilterFactory; +import org.apache.lucene.analysis.util.TokenizerFactory; +import org.apache.lucene.codecs.Codec; +import org.apache.lucene.codecs.DocValuesFormat; +import org.apache.lucene.codecs.PostingsFormat; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.admin.cluster.node.info.PluginsAndModules; +import org.elasticsearch.bootstrap.JarHell; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.component.LifecycleComponent; +import org.elasticsearch.common.inject.Module; +import org.elasticsearch.common.io.FileSystemUtils; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexModule; +import org.elasticsearch.threadpool.ExecutorBuilder; + import java.io.IOException; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; @@ -39,29 +64,6 @@ import java.util.Set; import java.util.function.Function; import java.util.stream.Collectors; -import org.apache.lucene.analysis.util.CharFilterFactory; -import org.apache.lucene.analysis.util.TokenFilterFactory; -import org.apache.lucene.analysis.util.TokenizerFactory; -import org.apache.lucene.codecs.Codec; -import org.apache.lucene.codecs.DocValuesFormat; -import org.apache.lucene.codecs.PostingsFormat; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.admin.cluster.node.info.PluginsAndModules; -import org.elasticsearch.bootstrap.JarHell; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.component.LifecycleComponent; -import org.elasticsearch.common.inject.Module; -import org.elasticsearch.common.io.FileSystemUtils; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Property; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.IndexModule; -import org.elasticsearch.threadpool.ExecutorBuilder; - import static org.elasticsearch.common.io.FileSystemUtils.isAccessibleDirectory; /** @@ -106,10 +108,9 @@ public class PluginsService extends AbstractComponent { */ public PluginsService(Settings settings, Path modulesDirectory, Path pluginsDirectory, Collection> classpathPlugins) { super(settings); - info = new PluginsAndModules(); List> pluginsLoaded = new ArrayList<>(); - + List pluginsList = new ArrayList<>(); // first we load plugins that are on the classpath. this is for tests and transport clients for (Class pluginClass : classpathPlugins) { Plugin plugin = loadPlugin(pluginClass, settings); @@ -118,9 +119,10 @@ public class PluginsService extends AbstractComponent { logger.trace("plugin loaded from classpath [{}]", pluginInfo); } pluginsLoaded.add(new Tuple<>(pluginInfo, plugin)); - info.addPlugin(pluginInfo); + pluginsList.add(pluginInfo); } + List modulesList = new ArrayList<>(); // load modules if (modulesDirectory != null) { try { @@ -128,7 +130,7 @@ public class PluginsService extends AbstractComponent { List> loaded = loadBundles(bundles); pluginsLoaded.addAll(loaded); for (Tuple module : loaded) { - info.addModule(module.v1()); + modulesList.add(module.v1()); } } catch (IOException ex) { throw new IllegalStateException("Unable to initialize modules", ex); @@ -142,18 +144,19 @@ public class PluginsService extends AbstractComponent { List> loaded = loadBundles(bundles); pluginsLoaded.addAll(loaded); for (Tuple plugin : loaded) { - info.addPlugin(plugin.v1()); + pluginsList.add(plugin.v1()); } } catch (IOException ex) { throw new IllegalStateException("Unable to initialize plugins", ex); } } - plugins = Collections.unmodifiableList(pluginsLoaded); + this.info = new PluginsAndModules(pluginsList, modulesList); + this.plugins = Collections.unmodifiableList(pluginsLoaded); // We need to build a List of plugins for checking mandatory plugins Set pluginsNames = new HashSet<>(); - for (Tuple tuple : plugins) { + for (Tuple tuple : this.plugins) { pluginsNames.add(tuple.v1().getName()); } @@ -177,7 +180,7 @@ public class PluginsService extends AbstractComponent { logPluginInfo(info.getPluginInfos(), "plugin", logger); Map> onModuleReferences = new HashMap<>(); - for (Tuple pluginEntry : plugins) { + for (Tuple pluginEntry : this.plugins) { Plugin plugin = pluginEntry.v2(); List list = new ArrayList<>(); for (Method method : plugin.getClass().getMethods()) { @@ -211,7 +214,7 @@ public class PluginsService extends AbstractComponent { this.onModuleReferences = Collections.unmodifiableMap(onModuleReferences); } - private static void logPluginInfo(final List pluginInfos, final String type, final ESLogger logger) { + private static void logPluginInfo(final List pluginInfos, final String type, final Logger logger) { assert pluginInfos != null; if (pluginInfos.isEmpty()) { logger.info("no " + type + "s loaded"); @@ -245,7 +248,7 @@ public class PluginsService extends AbstractComponent { logger.warn("plugin {}, failed to invoke custom onModule method", e, plugin.v1().getName()); throw new ElasticsearchException("failed to invoke onModule", e); } catch (Exception e) { - logger.warn("plugin {}, failed to invoke custom onModule method", e, plugin.v1().getName()); + logger.warn((Supplier) () -> new ParameterizedMessage("plugin {}, failed to invoke custom onModule method", plugin.v1().getName()), e); throw e; } } @@ -346,7 +349,7 @@ public class PluginsService extends AbstractComponent { } static List getPluginBundles(Path pluginsDirectory) throws IOException { - ESLogger logger = Loggers.getLogger(PluginsService.class); + Logger logger = Loggers.getLogger(PluginsService.class); // TODO: remove this leniency, but tests bogusly rely on it if (!isAccessibleDirectory(pluginsDirectory, logger)) { diff --git a/core/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java b/core/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java index 0b4f8d281d0..54cd34d6742 100644 --- a/core/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java +++ b/core/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java @@ -43,7 +43,7 @@ import static org.elasticsearch.cli.Terminal.Verbosity.VERBOSE; /** * A command for the plugin cli to remove a plugin from elasticsearch. */ -class RemovePluginCommand extends SettingCommand { +final class RemovePluginCommand extends SettingCommand { private final OptionSpec arguments; @@ -64,14 +64,16 @@ class RemovePluginCommand extends SettingCommand { terminal.println("-> Removing " + Strings.coalesceToEmpty(pluginName) + "..."); - Path pluginDir = env.pluginsFile().resolve(pluginName); + final Path pluginDir = env.pluginsFile().resolve(pluginName); if (Files.exists(pluginDir) == false) { - throw new UserException(ExitCodes.USAGE, "plugin " + pluginName + " not found; run 'elasticsearch-plugin list' to get list of installed plugins"); + throw new UserException( + ExitCodes.USAGE, + "plugin " + pluginName + " not found; run 'elasticsearch-plugin list' to get list of installed plugins"); } - List pluginPaths = new ArrayList<>(); + final List pluginPaths = new ArrayList<>(); - Path pluginBinDir = env.binFile().resolve(pluginName); + final Path pluginBinDir = env.binFile().resolve(pluginName); if (Files.exists(pluginBinDir)) { if (Files.isDirectory(pluginBinDir) == false) { throw new UserException(ExitCodes.IO_ERROR, "Bin dir for " + pluginName + " is not a directory"); @@ -81,10 +83,19 @@ class RemovePluginCommand extends SettingCommand { } terminal.println(VERBOSE, "Removing: " + pluginDir); - Path tmpPluginDir = env.pluginsFile().resolve(".removing-" + pluginName); + final Path tmpPluginDir = env.pluginsFile().resolve(".removing-" + pluginName); Files.move(pluginDir, tmpPluginDir, StandardCopyOption.ATOMIC_MOVE); pluginPaths.add(tmpPluginDir); IOUtils.rm(pluginPaths.toArray(new Path[pluginPaths.size()])); + + // we preserve the config files in case the user is upgrading the plugin, but we print + // a message so the user knows in case they want to remove manually + final Path pluginConfigDir = env.configFile().resolve(pluginName); + if (Files.exists(pluginConfigDir)) { + terminal.println( + "-> Preserving plugin config files [" + pluginConfigDir + "] in case of upgrade, delete manually if not needed"); + } } + } diff --git a/core/src/main/java/org/elasticsearch/repositories/RepositoriesService.java b/core/src/main/java/org/elasticsearch/repositories/RepositoriesService.java index 076853fd75d..e5951d48a00 100644 --- a/core/src/main/java/org/elasticsearch/repositories/RepositoriesService.java +++ b/core/src/main/java/org/elasticsearch/repositories/RepositoriesService.java @@ -19,6 +19,8 @@ package org.elasticsearch.repositories; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -140,7 +142,7 @@ public class RepositoriesService extends AbstractComponent implements ClusterSta @Override public void onFailure(String source, Exception e) { - logger.warn("failed to create repository [{}]", e, request.name); + logger.warn((Supplier) () -> new ParameterizedMessage("failed to create repository [{}]", request.name), e); super.onFailure(source, e); } @@ -214,7 +216,7 @@ public class RepositoriesService extends AbstractComponent implements ClusterSta try { repository.endVerification(verificationToken); } catch (Exception e) { - logger.warn("[{}] failed to finish repository verification", e, repositoryName); + logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to finish repository verification", repositoryName), e); listener.onFailure(e); return; } @@ -231,7 +233,7 @@ public class RepositoriesService extends AbstractComponent implements ClusterSta repository.endVerification(verificationToken); } catch (Exception inner) { inner.addSuppressed(e); - logger.warn("[{}] failed to finish repository verification", inner, repositoryName); + logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to finish repository verification", repositoryName), inner); } listener.onFailure(e); } @@ -293,14 +295,14 @@ public class RepositoriesService extends AbstractComponent implements ClusterSta } catch (RepositoryException ex) { // TODO: this catch is bogus, it means the old repo is already closed, // but we have nothing to replace it - logger.warn("failed to change repository [{}]", ex, repositoryMetaData.name()); + logger.warn((Supplier) () -> new ParameterizedMessage("failed to change repository [{}]", repositoryMetaData.name()), ex); } } } else { try { repository = createRepository(repositoryMetaData); } catch (RepositoryException ex) { - logger.warn("failed to create repository [{}]", ex, repositoryMetaData.name()); + logger.warn((Supplier) () -> new ParameterizedMessage("failed to create repository [{}]", repositoryMetaData.name()), ex); } } if (repository != null) { @@ -382,7 +384,7 @@ public class RepositoriesService extends AbstractComponent implements ClusterSta repository.start(); return repository; } catch (Exception e) { - logger.warn("failed to create repository [{}][{}]", e, repositoryMetaData.type(), repositoryMetaData.name()); + logger.warn((Supplier) () -> new ParameterizedMessage("failed to create repository [{}][{}]", repositoryMetaData.type(), repositoryMetaData.name()), e); throw new RepositoryException(repositoryMetaData.name(), "failed to create repository", e); } } diff --git a/core/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java b/core/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java index 65544421c8c..49edae3ce22 100644 --- a/core/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java +++ b/core/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java @@ -19,14 +19,10 @@ package org.elasticsearch.repositories; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.atomic.AtomicInteger; - import com.carrotsearch.hppc.ObjectContainer; import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -45,6 +41,12 @@ import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.atomic.AtomicInteger; + public class VerifyNodeRepositoryAction extends AbstractComponent { public static final String ACTION_NAME = "internal:admin/repository/verify"; @@ -83,7 +85,7 @@ public class VerifyNodeRepositoryAction extends AbstractComponent { try { doVerify(repository, verificationToken, localNode); } catch (Exception e) { - logger.warn("[{}] failed to verify repository", e, repository); + logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to verify repository", repository), e); errors.add(new VerificationFailure(node.getId(), e)); } if (counter.decrementAndGet() == 0) { @@ -154,7 +156,7 @@ public class VerifyNodeRepositoryAction extends AbstractComponent { try { doVerify(request.repository, request.verificationToken, localNode); } catch (Exception ex) { - logger.warn("[{}] failed to verify repository", ex, request.repository); + logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to verify repository", request.repository), ex); throw ex; } channel.sendResponse(TransportResponse.Empty.INSTANCE); diff --git a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index c6b4634e1cf..2969a6ee6e3 100644 --- a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -19,6 +19,8 @@ package org.elasticsearch.repositories.blobstore; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexFormatTooNewException; @@ -353,10 +355,9 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp try { snapshotInfo = getSnapshotInfo(snapshotId); } catch (SnapshotException e) { - logger.warn("[{}] repository is on a pre-5.0 format with an index file that contains snapshot [{}] but " + - "the corresponding snap-{}.dat file cannot be read. The snapshot will no longer be included in " + - "the repository but its data directories will remain.", e, getMetadata().name(), - snapshotId, snapshotId.getUUID()); + logger.warn((Supplier) () -> new ParameterizedMessage("[{}] repository is on a pre-5.0 format with an index file that contains snapshot [{}] but " + + "the corresponding snap-{}.dat file cannot be read. The snapshot will no longer be included in " + + "the repository but its data directories will remain.", getMetadata().name(), snapshotId, snapshotId.getUUID()), e); continue; } for (final String indexName : snapshotInfo.indices()) { @@ -424,7 +425,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp try { indexMetaDataFormat(snapshot.version()).delete(indexMetaDataBlobContainer, snapshotId.getUUID()); } catch (IOException ex) { - logger.warn("[{}] failed to delete metadata for index [{}]", ex, snapshotId, index); + logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to delete metadata for index [{}]", snapshotId, index), ex); } if (metaData != null) { IndexMetaData indexMetaData = metaData.index(index); @@ -433,7 +434,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp try { delete(snapshotId, snapshot.version(), indexId, new ShardId(indexMetaData.getIndex(), shardId)); } catch (SnapshotException ex) { - logger.warn("[{}] failed to delete shard data for shard [{}][{}]", ex, snapshotId, index, shardId); + final int finalShardId = shardId; + logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to delete shard data for shard [{}][{}]", snapshotId, index, finalShardId), ex); } } } @@ -452,12 +454,12 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp // we'll ignore that and accept that cleanup didn't fully succeed. // since we are using UUIDs for path names, this won't be an issue for // snapshotting indices of the same name - logger.debug("[{}] index [{}] no longer part of any snapshots in the repository, but failed to clean up " + - "its index folder due to the directory not being empty.", dnee, metadata.name(), indexId); + logger.debug((Supplier) () -> new ParameterizedMessage("[{}] index [{}] no longer part of any snapshots in the repository, but failed to clean up " + + "its index folder due to the directory not being empty.", metadata.name(), indexId), dnee); } catch (IOException ioe) { // a different IOException occurred while trying to delete - will just log the issue for now - logger.debug("[{}] index [{}] no longer part of any snapshots in the repository, but failed to clean up " + - "its index folder.", ioe, metadata.name(), indexId); + logger.debug((Supplier) () -> new ParameterizedMessage("[{}] index [{}] no longer part of any snapshots in the repository, but failed to clean up " + + "its index folder.", metadata.name(), indexId), ioe); } } } catch (IOException ex) { @@ -471,7 +473,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp try { snapshotFormat(snapshotInfo.version()).delete(snapshotsBlobContainer, blobId); } catch (IOException e) { - logger.warn("[{}] Unable to delete snapshot file [{}]", e, snapshotInfo.snapshotId(), blobId); + logger.warn((Supplier) () -> new ParameterizedMessage("[{}] Unable to delete snapshot file [{}]", snapshotInfo.snapshotId(), blobId), e); } } else { // we don't know the version, first try the current format, then the legacy format @@ -483,7 +485,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp snapshotLegacyFormat.delete(snapshotsBlobContainer, blobId); } catch (IOException e2) { // neither snapshot file could be deleted, log the error - logger.warn("Unable to delete snapshot file [{}]", e, blobId); + logger.warn((Supplier) () -> new ParameterizedMessage("Unable to delete snapshot file [{}]", blobId), e); } } } @@ -495,7 +497,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp try { globalMetaDataFormat(snapshotInfo.version()).delete(snapshotsBlobContainer, blobId); } catch (IOException e) { - logger.warn("[{}] Unable to delete global metadata file [{}]", e, snapshotInfo.snapshotId(), blobId); + logger.warn((Supplier) () -> new ParameterizedMessage("[{}] Unable to delete global metadata file [{}]", snapshotInfo.snapshotId(), blobId), e); } } else { // we don't know the version, first try the current format, then the legacy format @@ -507,7 +509,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp globalMetaDataLegacyFormat.delete(snapshotsBlobContainer, blobId); } catch (IOException e2) { // neither global metadata file could be deleted, log the error - logger.warn("Unable to delete global metadata file [{}]", e, blobId); + logger.warn((Supplier) () -> new ParameterizedMessage("Unable to delete global metadata file [{}]", blobId), e); } } } @@ -1074,7 +1076,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp blobContainer.deleteBlob(blobName); } catch (IOException e) { // TODO: don't catch and let the user handle it? - logger.debug("[{}] [{}] error deleting blob [{}] during cleanup", e, snapshotId, shardId, blobName); + logger.debug((Supplier) () -> new ParameterizedMessage("[{}] [{}] error deleting blob [{}] during cleanup", snapshotId, shardId, blobName), e); } } } @@ -1151,7 +1153,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp indexShardSnapshotsFormat.read(blobContainer, Integer.toString(latest)); return new Tuple<>(shardSnapshots, latest); } catch (IOException e) { - logger.warn("failed to read index file [{}]", e, SNAPSHOT_INDEX_PREFIX + latest); + final String file = SNAPSHOT_INDEX_PREFIX + latest; + logger.warn((Supplier) () -> new ParameterizedMessage("failed to read index file [{}]", file), e); } } @@ -1169,7 +1172,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp snapshots.add(new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles())); } } catch (IOException e) { - logger.warn("failed to read commit point [{}]", e, name); + logger.warn((Supplier) () -> new ParameterizedMessage("failed to read commit point [{}]", name), e); } } return new Tuple<>(new BlobStoreIndexShardSnapshots(snapshots), -1); @@ -1252,7 +1255,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp // in a bwc compatible way. maybeRecalculateMetadataHash(blobContainer, fileInfo, metadata); } catch (Exception e) { - logger.warn("{} Can't calculate hash from blob for file [{}] [{}]", e, shardId, fileInfo.physicalName(), fileInfo.metadata()); + logger.warn((Supplier) () -> new ParameterizedMessage("{} Can't calculate hash from blob for file [{}] [{}]", shardId, fileInfo.physicalName(), fileInfo.metadata()), e); } if (fileInfo.isSame(md) && snapshotFileExistsInBlobs(fileInfo, blobs)) { // a commit point file with the same name, size and checksum was already copied to repository @@ -1525,7 +1528,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp logger.trace("[{}] [{}] restoring from to an empty shard", shardId, snapshotId); recoveryTargetMetadata = Store.MetadataSnapshot.EMPTY; } catch (IOException e) { - logger.warn("{} Can't read metadata from store, will not reuse any local file while restoring", e, shardId); + logger.warn((Supplier) () -> new ParameterizedMessage("{} Can't read metadata from store, will not reuse any local file while restoring", shardId), e); recoveryTargetMetadata = Store.MetadataSnapshot.EMPTY; } @@ -1541,7 +1544,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp maybeRecalculateMetadataHash(blobContainer, fileInfo, recoveryTargetMetadata); } catch (Exception e) { // if the index is broken we might not be able to read it - logger.warn("{} Can't calculate hash from blog for file [{}] [{}]", e, shardId, fileInfo.physicalName(), fileInfo.metadata()); + logger.warn((Supplier) () -> new ParameterizedMessage("{} Can't calculate hash from blog for file [{}] [{}]", shardId, fileInfo.physicalName(), fileInfo.metadata()), e); } snapshotMetaData.put(fileInfo.metadata().name(), fileInfo.metadata()); fileInfos.put(fileInfo.metadata().name(), fileInfo); diff --git a/core/src/main/java/org/elasticsearch/rest/BytesRestResponse.java b/core/src/main/java/org/elasticsearch/rest/BytesRestResponse.java index fcf79962b6b..7af8249bf2e 100644 --- a/core/src/main/java/org/elasticsearch/rest/BytesRestResponse.java +++ b/core/src/main/java/org/elasticsearch/rest/BytesRestResponse.java @@ -19,11 +19,13 @@ package org.elasticsearch.rest; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -111,7 +113,7 @@ public class BytesRestResponse extends RestResponse { return this.status; } - private static final ESLogger SUPPRESSED_ERROR_LOGGER = ESLoggerFactory.getLogger("rest.suppressed"); + private static final Logger SUPPRESSED_ERROR_LOGGER = ESLoggerFactory.getLogger("rest.suppressed"); private static XContentBuilder convert(RestChannel channel, RestStatus status, Exception e) throws IOException { XContentBuilder builder = channel.newErrorBuilder().startObject(); @@ -123,9 +125,9 @@ public class BytesRestResponse extends RestResponse { params = new ToXContent.DelegatingMapParams(Collections.singletonMap(ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE, "false"), channel.request()); } else { if (status.getStatus() < 500) { - SUPPRESSED_ERROR_LOGGER.debug("path: {}, params: {}", e, channel.request().rawPath(), channel.request().params()); + SUPPRESSED_ERROR_LOGGER.debug((Supplier) () -> new ParameterizedMessage("path: {}, params: {}", channel.request().rawPath(), channel.request().params()), e); } else { - SUPPRESSED_ERROR_LOGGER.warn("path: {}, params: {}", e, channel.request().rawPath(), channel.request().params()); + SUPPRESSED_ERROR_LOGGER.warn((Supplier) () -> new ParameterizedMessage("path: {}, params: {}", channel.request().rawPath(), channel.request().params()), e); } params = channel.request(); } diff --git a/core/src/main/java/org/elasticsearch/rest/RestController.java b/core/src/main/java/org/elasticsearch/rest/RestController.java index d5ba350ff46..e63f35884e8 100644 --- a/core/src/main/java/org/elasticsearch/rest/RestController.java +++ b/core/src/main/java/org/elasticsearch/rest/RestController.java @@ -19,6 +19,8 @@ package org.elasticsearch.rest; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesArray; @@ -28,17 +30,12 @@ import org.elasticsearch.common.path.PathTrie; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.plugins.ActionPlugin; import java.io.IOException; import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.HashSet; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; -import static java.util.Collections.unmodifiableSet; import static org.elasticsearch.rest.RestStatus.BAD_REQUEST; import static org.elasticsearch.rest.RestStatus.OK; @@ -213,7 +210,7 @@ public class RestController extends AbstractLifecycleComponent { channel.sendResponse(new BytesRestResponse(channel, e)); } catch (Exception inner) { inner.addSuppressed(e); - logger.error("failed to send failure response for uri [{}]", inner, request.uri()); + logger.error((Supplier) () -> new ParameterizedMessage("failed to send failure response for uri [{}]", request.uri()), inner); } } @@ -315,7 +312,7 @@ public class RestController extends AbstractLifecycleComponent { try { channel.sendResponse(new BytesRestResponse(channel, e)); } catch (IOException e1) { - logger.error("Failed to send failure response for uri [{}]", e1, request.uri()); + logger.error((Supplier) () -> new ParameterizedMessage("Failed to send failure response for uri [{}]", request.uri()), e1); } } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/RestActionListener.java b/core/src/main/java/org/elasticsearch/rest/action/RestActionListener.java index 5074a120791..572da497c1f 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/RestActionListener.java +++ b/core/src/main/java/org/elasticsearch/rest/action/RestActionListener.java @@ -19,8 +19,8 @@ package org.elasticsearch.rest.action; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestChannel; @@ -33,7 +33,7 @@ public abstract class RestActionListener implements ActionListener params) { + public Script(String script, ScriptType type, String lang, @Nullable Map params) { this(script, type, lang, params, null); } @@ -78,14 +79,14 @@ public final class Script implements ToXContent, Writeable { * when serializing the script back to xcontent. */ @SuppressWarnings("unchecked") - public Script(String script, ScriptType type, @Nullable String lang, @Nullable Map params, + public Script(String script, ScriptType type, String lang, @Nullable Map params, @Nullable XContentType contentType) { if (contentType != null && type != ScriptType.INLINE) { throw new IllegalArgumentException("The parameter contentType only makes sense for inline scripts"); } this.script = Objects.requireNonNull(script); this.type = Objects.requireNonNull(type); - this.lang = lang; + this.lang = lang == null ? DEFAULT_SCRIPT_LANG : lang; this.params = (Map) params; this.contentType = contentType; } @@ -135,7 +136,7 @@ public final class Script implements ToXContent, Writeable { * @return The type of script -- inline, stored, or file. */ public ScriptType getType() { - return type == null ? DEFAULT_TYPE : type; + return type; } /** @@ -196,7 +197,7 @@ public final class Script implements ToXContent, Writeable { token = parser.nextToken(); } if (token == XContentParser.Token.VALUE_STRING) { - return new Script(parser.text()); + return new Script(parser.text(), ScriptType.INLINE, lang, null); } if (token != XContentParser.Token.START_OBJECT) { throw new ElasticsearchParseException("expected a string value or an object, but found [{}] instead", token); diff --git a/core/src/main/java/org/elasticsearch/script/ScriptService.java b/core/src/main/java/org/elasticsearch/script/ScriptService.java index 793c87077ec..9e61f39378e 100644 --- a/core/src/main/java/org/elasticsearch/script/ScriptService.java +++ b/core/src/main/java/org/elasticsearch/script/ScriptService.java @@ -19,6 +19,8 @@ package org.elasticsearch.script; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; @@ -90,8 +92,6 @@ public class ScriptService extends AbstractComponent implements Closeable, Clust public static final Setting SCRIPT_MAX_COMPILATIONS_PER_MINUTE = Setting.intSetting("script.max_compilations_per_minute", 15, 0, Property.Dynamic, Property.NodeScope); - private final String defaultLang; - private final Collection scriptEngines; private final Map scriptEnginesByLang; private final Map scriptEnginesByExt; @@ -129,8 +129,6 @@ public class ScriptService extends AbstractComponent implements Closeable, Clust this.scriptContextRegistry = scriptContextRegistry; int cacheMaxSize = SCRIPT_CACHE_SIZE_SETTING.get(settings); - this.defaultLang = scriptSettings.getDefaultScriptLanguageSetting().get(settings); - CacheBuilder cacheBuilder = CacheBuilder.builder(); if (cacheMaxSize >= 0) { cacheBuilder.setMaximumWeight(cacheMaxSize); @@ -220,11 +218,6 @@ public class ScriptService extends AbstractComponent implements Closeable, Clust } String lang = script.getLang(); - - if (lang == null) { - lang = defaultLang; - } - ScriptEngineService scriptEngineService = getScriptEngineServiceForLang(lang); if (canExecuteScript(lang, script.getType(), scriptContext) == false) { throw new IllegalStateException("scripts of type [" + script.getType() + "], operation [" + scriptContext.getKey() + "] and lang [" + lang + "] are disabled"); @@ -283,7 +276,7 @@ public class ScriptService extends AbstractComponent implements Closeable, Clust throw new IllegalArgumentException("The parameter script (Script) must not be null."); } - String lang = script.getLang() == null ? defaultLang : script.getLang(); + String lang = script.getLang(); ScriptType type = script.getType(); //script.getScript() could return either a name or code for a script, //but we check for a file script name first and an indexed script name second @@ -362,9 +355,8 @@ public class ScriptService extends AbstractComponent implements Closeable, Clust } private String validateScriptLanguage(String scriptLang) { - if (scriptLang == null) { - scriptLang = defaultLang; - } else if (scriptEnginesByLang.containsKey(scriptLang) == false) { + Objects.requireNonNull(scriptLang); + if (scriptEnginesByLang.containsKey(scriptLang) == false) { throw new IllegalArgumentException("script_lang not supported [" + scriptLang + "]"); } return scriptLang; @@ -527,8 +519,7 @@ public class ScriptService extends AbstractComponent implements Closeable, Clust "Limit of script size in bytes [{}] has been exceeded for script [{}] with size [{}]", allowedScriptSizeInBytes, identifier, - scriptSizeInBytes - ); + scriptSizeInBytes); throw new IllegalArgumentException(message); } } @@ -605,7 +596,7 @@ public class ScriptService extends AbstractComponent implements Closeable, Clust logger.warn("skipping compile of script file [{}] as all scripted operations are disabled for file scripts", file.toAbsolutePath()); } } catch (Exception e) { - logger.warn("failed to load/compile script [{}]", e, scriptNameExt.v1()); + logger.warn((Supplier) () -> new ParameterizedMessage("failed to load/compile script [{}]", scriptNameExt.v1()), e); } } } diff --git a/core/src/main/java/org/elasticsearch/script/ScriptSettings.java b/core/src/main/java/org/elasticsearch/script/ScriptSettings.java index 9c98b8d1e8c..1cb2b356245 100644 --- a/core/src/main/java/org/elasticsearch/script/ScriptSettings.java +++ b/core/src/main/java/org/elasticsearch/script/ScriptSettings.java @@ -32,7 +32,16 @@ import java.util.function.Function; public class ScriptSettings { - public static final String DEFAULT_LANG = "painless"; + static final String LEGACY_DEFAULT_LANG = "groovy"; + + /** + * The default script language to use for scripts that are stored in documents that have no script lang set explicitly. + * This setting is legacy setting and only applies for indices created on ES versions prior to version 5.0 + * + * This constant will be removed in the next major release. + */ + @Deprecated + public static final String LEGACY_SCRIPT_SETTING = "script.legacy.default_lang"; private static final Map> SCRIPT_TYPE_SETTING_MAP; @@ -49,7 +58,7 @@ public class ScriptSettings { private final Map> scriptContextSettingMap; private final List> scriptLanguageSettings; - private final Setting defaultScriptLanguageSetting; + private final Setting defaultLegacyScriptLanguageSetting; public ScriptSettings(ScriptEngineRegistry scriptEngineRegistry, ScriptContextRegistry scriptContextRegistry) { Map> scriptContextSettingMap = contextSettings(scriptContextRegistry); @@ -58,8 +67,8 @@ public class ScriptSettings { List> scriptLanguageSettings = languageSettings(SCRIPT_TYPE_SETTING_MAP, scriptContextSettingMap, scriptEngineRegistry, scriptContextRegistry); this.scriptLanguageSettings = Collections.unmodifiableList(scriptLanguageSettings); - this.defaultScriptLanguageSetting = new Setting<>("script.default_lang", DEFAULT_LANG, setting -> { - if (!DEFAULT_LANG.equals(setting) && !scriptEngineRegistry.getRegisteredLanguages().containsKey(setting)) { + this.defaultLegacyScriptLanguageSetting = new Setting<>(LEGACY_SCRIPT_SETTING, LEGACY_DEFAULT_LANG, setting -> { + if (!LEGACY_DEFAULT_LANG.equals(setting) && !scriptEngineRegistry.getRegisteredLanguages().containsKey(setting)) { throw new IllegalArgumentException("unregistered default language [" + setting + "]"); } return setting; @@ -160,7 +169,7 @@ public class ScriptSettings { settings.addAll(SCRIPT_TYPE_SETTING_MAP.values()); settings.addAll(scriptContextSettingMap.values()); settings.addAll(scriptLanguageSettings); - settings.add(defaultScriptLanguageSetting); + settings.add(defaultLegacyScriptLanguageSetting); return settings; } @@ -168,7 +177,11 @@ public class ScriptSettings { return scriptLanguageSettings; } - public Setting getDefaultScriptLanguageSetting() { - return defaultScriptLanguageSetting; + public Setting getDefaultLegacyScriptLanguageSetting() { + return defaultLegacyScriptLanguageSetting; + } + + public static String getLegacyDefaultLang(Settings settings) { + return settings.get(LEGACY_SCRIPT_SETTING, ScriptSettings.LEGACY_DEFAULT_LANG); } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParser.java index 1ae31e09ba0..e669ee8b9d9 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParser.java @@ -19,11 +19,11 @@ package org.elasticsearch.search.aggregations.bucket.geogrid; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.index.query.GeoBoundingBoxQueryBuilder; import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.GeoPointValuesSourceParser; +import org.elasticsearch.search.aggregations.support.XContentParseContext; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSourceType; @@ -65,16 +65,17 @@ public class GeoHashGridParser extends GeoPointValuesSourceParser { } @Override - protected boolean token(String aggregationName, String currentFieldName, Token token, XContentParser parser, - ParseFieldMatcher parseFieldMatcher, Map otherOptions) throws IOException { + protected boolean token(String aggregationName, String currentFieldName, Token token, + XContentParseContext context, Map otherOptions) throws IOException { + XContentParser parser = context.getParser(); if (token == XContentParser.Token.VALUE_NUMBER || token == XContentParser.Token.VALUE_STRING) { - if (parseFieldMatcher.match(currentFieldName, GeoHashGridParams.FIELD_PRECISION)) { + if (context.matchField(currentFieldName, GeoHashGridParams.FIELD_PRECISION)) { otherOptions.put(GeoHashGridParams.FIELD_PRECISION, parser.intValue()); return true; - } else if (parseFieldMatcher.match(currentFieldName, GeoHashGridParams.FIELD_SIZE)) { + } else if (context.matchField(currentFieldName, GeoHashGridParams.FIELD_SIZE)) { otherOptions.put(GeoHashGridParams.FIELD_SIZE, parser.intValue()); return true; - } else if (parseFieldMatcher.match(currentFieldName, GeoHashGridParams.FIELD_SHARD_SIZE)) { + } else if (context.matchField(currentFieldName, GeoHashGridParams.FIELD_SHARD_SIZE)) { otherOptions.put(GeoHashGridParams.FIELD_SHARD_SIZE, parser.intValue()); return true; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramParser.java index e3a3ea75762..952a0e2568f 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramParser.java @@ -19,11 +19,11 @@ package org.elasticsearch.search.aggregations.bucket.histogram; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.NumericValuesSourceParser; +import org.elasticsearch.search.aggregations.support.XContentParseContext; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSourceType; @@ -79,10 +79,11 @@ public class DateHistogramParser extends NumericValuesSourceParser { } @Override - protected boolean token(String aggregationName, String currentFieldName, Token token, XContentParser parser, - ParseFieldMatcher parseFieldMatcher, Map otherOptions) throws IOException { + protected boolean token(String aggregationName, String currentFieldName, Token token, + XContentParseContext context, Map otherOptions) throws IOException { + XContentParser parser = context.getParser(); if (token.isValue()) { - if (parseFieldMatcher.match(currentFieldName, Histogram.INTERVAL_FIELD)) { + if (context.matchField(currentFieldName, Histogram.INTERVAL_FIELD)) { if (token == XContentParser.Token.VALUE_STRING) { otherOptions.put(Histogram.INTERVAL_FIELD, new DateHistogramInterval(parser.text())); return true; @@ -90,13 +91,13 @@ public class DateHistogramParser extends NumericValuesSourceParser { otherOptions.put(Histogram.INTERVAL_FIELD, parser.longValue()); return true; } - } else if (parseFieldMatcher.match(currentFieldName, Histogram.MIN_DOC_COUNT_FIELD)) { + } else if (context.matchField(currentFieldName, Histogram.MIN_DOC_COUNT_FIELD)) { otherOptions.put(Histogram.MIN_DOC_COUNT_FIELD, parser.longValue()); return true; - } else if (parseFieldMatcher.match(currentFieldName, Histogram.KEYED_FIELD)) { + } else if (context.matchField(currentFieldName, Histogram.KEYED_FIELD)) { otherOptions.put(Histogram.KEYED_FIELD, parser.booleanValue()); return true; - } else if (parseFieldMatcher.match(currentFieldName, Histogram.OFFSET_FIELD)) { + } else if (context.matchField(currentFieldName, Histogram.OFFSET_FIELD)) { if (token == XContentParser.Token.VALUE_STRING) { otherOptions.put(Histogram.OFFSET_FIELD, DateHistogramAggregationBuilder.parseStringOffset(parser.text())); @@ -109,7 +110,7 @@ public class DateHistogramParser extends NumericValuesSourceParser { return false; } } else if (token == XContentParser.Token.START_OBJECT) { - if (parseFieldMatcher.match(currentFieldName, Histogram.ORDER_FIELD)) { + if (context.matchField(currentFieldName, Histogram.ORDER_FIELD)) { InternalOrder order = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { @@ -127,9 +128,10 @@ public class DateHistogramParser extends NumericValuesSourceParser { } otherOptions.put(Histogram.ORDER_FIELD, order); return true; - } else if (parseFieldMatcher.match(currentFieldName, ExtendedBounds.EXTENDED_BOUNDS_FIELD)) { + } else if (context.matchField(currentFieldName, ExtendedBounds.EXTENDED_BOUNDS_FIELD)) { try { - otherOptions.put(ExtendedBounds.EXTENDED_BOUNDS_FIELD, ExtendedBounds.PARSER.apply(parser, () -> parseFieldMatcher)); + otherOptions.put(ExtendedBounds.EXTENDED_BOUNDS_FIELD, + ExtendedBounds.PARSER.apply(parser, context::getParseFieldMatcher)); } catch (Exception e) { throw new ParsingException(parser.getTokenLocation(), "Error parsing [{}]", e, aggregationName); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramParser.java index 69aed3e733a..f27677a1a66 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramParser.java @@ -19,13 +19,13 @@ package org.elasticsearch.search.aggregations.bucket.histogram; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParseFieldMatcherSupplier; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.NumericValuesSourceParser; +import org.elasticsearch.search.aggregations.support.XContentParseContext; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSourceType; @@ -85,26 +85,27 @@ public class HistogramParser extends NumericValuesSourceParser { } @Override - protected boolean token(String aggregationName, String currentFieldName, Token token, XContentParser parser, - ParseFieldMatcher parseFieldMatcher, Map otherOptions) throws IOException { + protected boolean token(String aggregationName, String currentFieldName, Token token, + XContentParseContext context, Map otherOptions) throws IOException { + XContentParser parser = context.getParser(); if (token.isValue()) { - if (parseFieldMatcher.match(currentFieldName, Histogram.INTERVAL_FIELD)) { + if (context.matchField(currentFieldName, Histogram.INTERVAL_FIELD)) { otherOptions.put(Histogram.INTERVAL_FIELD, parser.doubleValue()); return true; - } else if (parseFieldMatcher.match(currentFieldName, Histogram.MIN_DOC_COUNT_FIELD)) { + } else if (context.matchField(currentFieldName, Histogram.MIN_DOC_COUNT_FIELD)) { otherOptions.put(Histogram.MIN_DOC_COUNT_FIELD, parser.longValue()); return true; - } else if (parseFieldMatcher.match(currentFieldName, Histogram.KEYED_FIELD)) { + } else if (context.matchField(currentFieldName, Histogram.KEYED_FIELD)) { otherOptions.put(Histogram.KEYED_FIELD, parser.booleanValue()); return true; - } else if (parseFieldMatcher.match(currentFieldName, Histogram.OFFSET_FIELD)) { + } else if (context.matchField(currentFieldName, Histogram.OFFSET_FIELD)) { otherOptions.put(Histogram.OFFSET_FIELD, parser.doubleValue()); return true; } else { return false; } } else if (token == XContentParser.Token.START_OBJECT) { - if (parseFieldMatcher.match(currentFieldName, Histogram.ORDER_FIELD)) { + if (context.matchField(currentFieldName, Histogram.ORDER_FIELD)) { InternalOrder order = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { @@ -122,8 +123,8 @@ public class HistogramParser extends NumericValuesSourceParser { } otherOptions.put(Histogram.ORDER_FIELD, order); return true; - } else if (parseFieldMatcher.match(currentFieldName, Histogram.EXTENDED_BOUNDS_FIELD)) { - double[] bounds = EXTENDED_BOUNDS_PARSER.apply(parser, () -> parseFieldMatcher); + } else if (context.matchField(currentFieldName, Histogram.EXTENDED_BOUNDS_FIELD)) { + double[] bounds = EXTENDED_BOUNDS_PARSER.apply(parser, context::getParseFieldMatcher); otherOptions.put(Histogram.EXTENDED_BOUNDS_FIELD, bounds); return true; } else { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingParser.java index fff81db1301..5d6844ebbd2 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingParser.java @@ -19,9 +19,9 @@ package org.elasticsearch.search.aggregations.bucket.missing; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.AnyValuesSourceParser; +import org.elasticsearch.search.aggregations.support.XContentParseContext; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSourceType; @@ -35,8 +35,8 @@ public class MissingParser extends AnyValuesSourceParser { } @Override - protected boolean token(String aggregationName, String currentFieldName, XContentParser.Token token, XContentParser parser, - ParseFieldMatcher parseFieldMatcher, Map otherOptions) throws IOException { + protected boolean token(String aggregationName, String currentFieldName, XContentParser.Token token, + XContentParseContext context, Map otherOptions) throws IOException { return false; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeParser.java index 6ebd413d2d4..c8cb2c76715 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeParser.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.search.aggregations.bucket.range.RangeAggregator.Range; import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.NumericValuesSourceParser; +import org.elasticsearch.search.aggregations.support.XContentParseContext; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSourceType; @@ -65,20 +66,21 @@ public class RangeParser extends NumericValuesSourceParser { } @Override - protected boolean token(String aggregationName, String currentFieldName, Token token, XContentParser parser, - ParseFieldMatcher parseFieldMatcher, Map otherOptions) throws IOException { + protected boolean token(String aggregationName, String currentFieldName, Token token, + XContentParseContext context, Map otherOptions) throws IOException { + XContentParser parser = context.getParser(); if (token == XContentParser.Token.START_ARRAY) { - if (parseFieldMatcher.match(currentFieldName, RangeAggregator.RANGES_FIELD)) { + if (context.matchField(currentFieldName, RangeAggregator.RANGES_FIELD)) { List ranges = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - Range range = parseRange(parser, parseFieldMatcher); + Range range = parseRange(parser, context.getParseFieldMatcher()); ranges.add(range); } otherOptions.put(RangeAggregator.RANGES_FIELD, ranges); return true; } } else if (token == XContentParser.Token.VALUE_BOOLEAN) { - if (parseFieldMatcher.match(currentFieldName, RangeAggregator.KEYED_FIELD)) { + if (context.matchField(currentFieldName, RangeAggregator.KEYED_FIELD)) { boolean keyed = parser.booleanValue(); otherOptions.put(RangeAggregator.KEYED_FIELD, keyed); return true; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceParser.java index 16cb909ea0f..677731d64ef 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceParser.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.aggregations.bucket.range.geodistance; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.geo.GeoDistance; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.io.stream.StreamInput; @@ -30,6 +29,7 @@ import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.search.aggregations.bucket.range.RangeAggregator; import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.GeoPointValuesSourceParser; import org.elasticsearch.search.aggregations.support.GeoPointParser; +import org.elasticsearch.search.aggregations.support.XContentParseContext; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSourceType; @@ -110,28 +110,29 @@ public class GeoDistanceParser extends GeoPointValuesSourceParser { } @Override - protected boolean token(String aggregationName, String currentFieldName, Token token, XContentParser parser, - ParseFieldMatcher parseFieldMatcher, Map otherOptions) throws IOException { - if (geoPointParser.token(aggregationName, currentFieldName, token, parser, parseFieldMatcher, otherOptions)) { + protected boolean token(String aggregationName, String currentFieldName, Token token, + XContentParseContext context, Map otherOptions) throws IOException { + XContentParser parser = context.getParser(); + if (geoPointParser.token(aggregationName, currentFieldName, token, parser, context.getParseFieldMatcher(), otherOptions)) { return true; } else if (token == XContentParser.Token.VALUE_STRING) { - if (parseFieldMatcher.match(currentFieldName, UNIT_FIELD)) { + if (context.matchField(currentFieldName, UNIT_FIELD)) { DistanceUnit unit = DistanceUnit.fromString(parser.text()); otherOptions.put(UNIT_FIELD, unit); return true; - } else if (parseFieldMatcher.match(currentFieldName, DISTANCE_TYPE_FIELD)) { + } else if (context.matchField(currentFieldName, DISTANCE_TYPE_FIELD)) { GeoDistance distanceType = GeoDistance.fromString(parser.text()); otherOptions.put(DISTANCE_TYPE_FIELD, distanceType); return true; } } else if (token == XContentParser.Token.VALUE_BOOLEAN) { - if (parseFieldMatcher.match(currentFieldName, RangeAggregator.KEYED_FIELD)) { + if (context.matchField(currentFieldName, RangeAggregator.KEYED_FIELD)) { boolean keyed = parser.booleanValue(); otherOptions.put(RangeAggregator.KEYED_FIELD, keyed); return true; } } else if (token == XContentParser.Token.START_ARRAY) { - if (parseFieldMatcher.match(currentFieldName, RangeAggregator.RANGES_FIELD)) { + if (context.matchField(currentFieldName, RangeAggregator.RANGES_FIELD)) { List ranges = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { String fromAsStr = null; @@ -144,17 +145,17 @@ public class GeoDistanceParser extends GeoPointValuesSourceParser { if (token == XContentParser.Token.FIELD_NAME) { toOrFromOrKey = parser.currentName(); } else if (token == XContentParser.Token.VALUE_NUMBER) { - if (parseFieldMatcher.match(toOrFromOrKey, Range.FROM_FIELD)) { + if (context.matchField(toOrFromOrKey, Range.FROM_FIELD)) { from = parser.doubleValue(); - } else if (parseFieldMatcher.match(toOrFromOrKey, Range.TO_FIELD)) { + } else if (context.matchField(toOrFromOrKey, Range.TO_FIELD)) { to = parser.doubleValue(); } } else if (token == XContentParser.Token.VALUE_STRING) { - if (parseFieldMatcher.match(toOrFromOrKey, Range.KEY_FIELD)) { + if (context.matchField(toOrFromOrKey, Range.KEY_FIELD)) { key = parser.text(); - } else if (parseFieldMatcher.match(toOrFromOrKey, Range.FROM_FIELD)) { + } else if (context.matchField(toOrFromOrKey, Range.FROM_FIELD)) { fromAsStr = parser.text(); - } else if (parseFieldMatcher.match(toOrFromOrKey, Range.TO_FIELD)) { + } else if (context.matchField(toOrFromOrKey, Range.TO_FIELD)) { toAsStr = parser.text(); } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ip/IpRangeParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ip/IpRangeParser.java index 8445fb2d459..5d95f0dd494 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ip/IpRangeParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ip/IpRangeParser.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.BytesValuesSourceParser; import org.elasticsearch.search.aggregations.bucket.range.RangeAggregator; import org.elasticsearch.search.aggregations.bucket.range.ip.IpRangeAggregationBuilder.Range; +import org.elasticsearch.search.aggregations.support.XContentParseContext; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; @@ -102,21 +103,22 @@ public class IpRangeParser extends BytesValuesSourceParser { @Override protected boolean token(String aggregationName, String currentFieldName, - Token token, XContentParser parser, - ParseFieldMatcher parseFieldMatcher, - Map otherOptions) throws IOException { - if (parseFieldMatcher.match(currentFieldName, RangeAggregator.RANGES_FIELD)) { + Token token, + XContentParseContext context, + Map otherOptions) throws IOException { + XContentParser parser = context.getParser(); + if (context.matchField(currentFieldName, RangeAggregator.RANGES_FIELD)) { if (parser.currentToken() != Token.START_ARRAY) { throw new ParsingException(parser.getTokenLocation(), "[ranges] must be passed as an array, but got a " + token); } List ranges = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - Range range = parseRange(parser, parseFieldMatcher); + Range range = parseRange(parser, context.getParseFieldMatcher()); ranges.add(range); } otherOptions.put(RangeAggregator.RANGES_FIELD, ranges); return true; - } else if (parseFieldMatcher.match(parser.currentName(), RangeAggregator.KEYED_FIELD)) { + } else if (context.matchField(parser.currentName(), RangeAggregator.KEYED_FIELD)) { otherOptions.put(RangeAggregator.KEYED_FIELD, parser.booleanValue()); return true; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedSamplerParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedSamplerParser.java index f495071f6d2..a62035d7234 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedSamplerParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedSamplerParser.java @@ -20,9 +20,9 @@ package org.elasticsearch.search.aggregations.bucket.sampler; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.AnyValuesSourceParser; +import org.elasticsearch.search.aggregations.support.XContentParseContext; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSourceType; @@ -57,20 +57,21 @@ public class DiversifiedSamplerParser extends AnyValuesSourceParser { } @Override - protected boolean token(String aggregationName, String currentFieldName, XContentParser.Token token, XContentParser parser, - ParseFieldMatcher parseFieldMatcher, Map otherOptions) throws IOException { + protected boolean token(String aggregationName, String currentFieldName, XContentParser.Token token, + XContentParseContext context, Map otherOptions) throws IOException { + XContentParser parser = context.getParser(); if (token == XContentParser.Token.VALUE_NUMBER) { - if (parseFieldMatcher.match(currentFieldName, SamplerAggregator.SHARD_SIZE_FIELD)) { + if (context.matchField(currentFieldName, SamplerAggregator.SHARD_SIZE_FIELD)) { int shardSize = parser.intValue(); otherOptions.put(SamplerAggregator.SHARD_SIZE_FIELD, shardSize); return true; - } else if (parseFieldMatcher.match(currentFieldName, SamplerAggregator.MAX_DOCS_PER_VALUE_FIELD)) { + } else if (context.matchField(currentFieldName, SamplerAggregator.MAX_DOCS_PER_VALUE_FIELD)) { int maxDocsPerValue = parser.intValue(); otherOptions.put(SamplerAggregator.MAX_DOCS_PER_VALUE_FIELD, maxDocsPerValue); return true; } } else if (token == XContentParser.Token.VALUE_STRING) { - if (parseFieldMatcher.match(currentFieldName, SamplerAggregator.EXECUTION_HINT_FIELD)) { + if (context.matchField(currentFieldName, SamplerAggregator.EXECUTION_HINT_FIELD)) { String executionHint = parser.text(); otherOptions.put(SamplerAggregator.EXECUTION_HINT_FIELD, executionHint); return true; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsParser.java index ba87f0917a0..0f08cf0a0a3 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsParser.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.aggregations.bucket.significant; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.xcontent.ParseFieldRegistry; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; @@ -33,6 +32,7 @@ import org.elasticsearch.search.aggregations.bucket.terms.AbstractTermsParser; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator.BucketCountThresholds; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; +import org.elasticsearch.search.aggregations.support.XContentParseContext; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSourceType; @@ -81,17 +81,18 @@ public class SignificantTermsParser extends AbstractTermsParser { } @Override - public boolean parseSpecial(String aggregationName, XContentParser parser, ParseFieldMatcher parseFieldMatcher, Token token, - String currentFieldName, Map otherOptions) throws IOException { + public boolean parseSpecial(String aggregationName, XContentParseContext context, Token token, + String currentFieldName, Map otherOptions) throws IOException { if (token == XContentParser.Token.START_OBJECT) { SignificanceHeuristicParser significanceHeuristicParser = significanceHeuristicParserRegistry - .lookupReturningNullIfNotFound(currentFieldName, parseFieldMatcher); + .lookupReturningNullIfNotFound(currentFieldName, context.getParseFieldMatcher()); if (significanceHeuristicParser != null) { - SignificanceHeuristic significanceHeuristic = significanceHeuristicParser.parse(parser, parseFieldMatcher); + SignificanceHeuristic significanceHeuristic = significanceHeuristicParser.parse(context); otherOptions.put(SignificantTermsAggregationBuilder.HEURISTIC, significanceHeuristic); return true; - } else if (parseFieldMatcher.match(currentFieldName, SignificantTermsAggregationBuilder.BACKGROUND_FILTER)) { - QueryParseContext queryParseContext = new QueryParseContext(queriesRegistry, parser, parseFieldMatcher); + } else if (context.matchField(currentFieldName, SignificantTermsAggregationBuilder.BACKGROUND_FILTER)) { + QueryParseContext queryParseContext = new QueryParseContext(context.getDefaultScriptLanguage(), queriesRegistry, + context.getParser(), context.getParseFieldMatcher()); Optional filter = queryParseContext.parseInnerQueryBuilder(); if (filter.isPresent()) { otherOptions.put(SignificantTermsAggregationBuilder.BACKGROUND_FILTER, filter.get()); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/GND.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/GND.java index d8610dc05c8..3ae26639aa9 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/GND.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/GND.java @@ -22,12 +22,12 @@ package org.elasticsearch.search.aggregations.bucket.significant.heuristics; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryShardException; +import org.elasticsearch.search.aggregations.support.XContentParseContext; import java.io.IOException; @@ -113,13 +113,13 @@ public class GND extends NXYSignificanceHeuristic { } @Override - public SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher) - throws IOException, QueryShardException { + public SignificanceHeuristic parse(XContentParseContext context) throws IOException, QueryShardException { + XContentParser parser = context.getParser(); String givenName = parser.currentName(); boolean backgroundIsSuperset = true; XContentParser.Token token = parser.nextToken(); while (!token.equals(XContentParser.Token.END_OBJECT)) { - if (parseFieldMatcher.match(parser.currentName(), BACKGROUND_IS_SUPERSET)) { + if (context.matchField(parser.currentName(), BACKGROUND_IS_SUPERSET)) { parser.nextToken(); backgroundIsSuperset = parser.booleanValue(); } else { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/JLHScore.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/JLHScore.java index d426b146620..58f8060a108 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/JLHScore.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/JLHScore.java @@ -22,12 +22,12 @@ package org.elasticsearch.search.aggregations.bucket.significant.heuristics; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryShardException; +import org.elasticsearch.search.aggregations.support.XContentParseContext; import java.io.IOException; @@ -104,8 +104,9 @@ public class JLHScore extends SignificanceHeuristic { return builder; } - public static SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher) + public static SignificanceHeuristic parse(XContentParseContext context) throws IOException, QueryShardException { + XContentParser parser = context.getParser(); // move to the closing bracket if (!parser.nextToken().equals(XContentParser.Token.END_OBJECT)) { throw new ElasticsearchParseException( diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/NXYSignificanceHeuristic.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/NXYSignificanceHeuristic.java index 3036c57865d..d6064ca37fd 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/NXYSignificanceHeuristic.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/NXYSignificanceHeuristic.java @@ -23,12 +23,12 @@ package org.elasticsearch.search.aggregations.bucket.significant.heuristics; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryShardException; +import org.elasticsearch.search.aggregations.support.XContentParseContext; import java.io.IOException; @@ -152,17 +152,18 @@ public abstract class NXYSignificanceHeuristic extends SignificanceHeuristic { public abstract static class NXYParser implements SignificanceHeuristicParser { @Override - public SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher) + public SignificanceHeuristic parse(XContentParseContext context) throws IOException, QueryShardException { + XContentParser parser = context.getParser(); String givenName = parser.currentName(); boolean includeNegatives = false; boolean backgroundIsSuperset = true; XContentParser.Token token = parser.nextToken(); while (!token.equals(XContentParser.Token.END_OBJECT)) { - if (parseFieldMatcher.match(parser.currentName(), INCLUDE_NEGATIVES_FIELD)) { + if (context.matchField(parser.currentName(), INCLUDE_NEGATIVES_FIELD)) { parser.nextToken(); includeNegatives = parser.booleanValue(); - } else if (parseFieldMatcher.match(parser.currentName(), BACKGROUND_IS_SUPERSET)) { + } else if (context.matchField(parser.currentName(), BACKGROUND_IS_SUPERSET)) { parser.nextToken(); backgroundIsSuperset = parser.booleanValue(); } else { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/PercentageScore.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/PercentageScore.java index 7bc117a0ec8..c7e5c7ead6f 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/PercentageScore.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/PercentageScore.java @@ -22,12 +22,12 @@ package org.elasticsearch.search.aggregations.bucket.significant.heuristics; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryShardException; +import org.elasticsearch.search.aggregations.support.XContentParseContext; import java.io.IOException; @@ -56,8 +56,9 @@ public class PercentageScore extends SignificanceHeuristic { return builder; } - public static SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher) + public static SignificanceHeuristic parse(XContentParseContext context) throws IOException, QueryShardException { + XContentParser parser = context.getParser(); // move to the closing bracket if (!parser.nextToken().equals(XContentParser.Token.END_OBJECT)) { throw new ElasticsearchParseException("failed to parse [percentage] significance heuristic. expected an empty object, but got [{}] instead", parser.currentToken()); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java index 1f99ebad216..c933f9ef596 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java @@ -22,7 +22,6 @@ package org.elasticsearch.search.aggregations.bucket.significant.heuristics; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.ESLoggerFactory; @@ -35,6 +34,7 @@ import org.elasticsearch.script.Script.ScriptField; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.support.XContentParseContext; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; @@ -146,8 +146,9 @@ public class ScriptHeuristic extends SignificanceHeuristic { return Objects.equals(script, other.script); } - public static SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher) + public static SignificanceHeuristic parse(XContentParseContext context) throws IOException, QueryShardException { + XContentParser parser = context.getParser(); String heuristicName = parser.currentName(); Script script = null; XContentParser.Token token; @@ -156,8 +157,8 @@ public class ScriptHeuristic extends SignificanceHeuristic { if (token.equals(XContentParser.Token.FIELD_NAME)) { currentFieldName = parser.currentName(); } else { - if (parseFieldMatcher.match(currentFieldName, ScriptField.SCRIPT)) { - script = Script.parse(parser, parseFieldMatcher); + if (context.matchField(currentFieldName, ScriptField.SCRIPT)) { + script = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage()); } else { throw new ElasticsearchParseException("failed to parse [{}] significance heuristic. unknown object [{}]", heuristicName, currentFieldName); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristicParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristicParser.java index 1e1f4bfd486..26fd552a6b1 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristicParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristicParser.java @@ -20,9 +20,9 @@ package org.elasticsearch.search.aggregations.bucket.significant.heuristics; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.aggregations.support.XContentParseContext; import java.io.IOException; @@ -31,6 +31,5 @@ import java.io.IOException; */ @FunctionalInterface public interface SignificanceHeuristicParser { - SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException, - ParsingException; + SignificanceHeuristic parse(XContentParseContext context) throws IOException, ParsingException; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractTermsParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractTermsParser.java index 3f27c4f1c6f..a106cea3a15 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractTermsParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractTermsParser.java @@ -20,13 +20,13 @@ package org.elasticsearch.search.aggregations.bucket.terms; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator.BucketCountThresholds; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.AnyValuesSourceParser; +import org.elasticsearch.search.aggregations.support.XContentParseContext; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; @@ -89,47 +89,48 @@ public abstract class AbstractTermsParser extends AnyValuesSourceParser { Map otherOptions); @Override - protected boolean token(String aggregationName, String currentFieldName, Token token, XContentParser parser, - ParseFieldMatcher parseFieldMatcher, Map otherOptions) throws IOException { - if (incExcParser.token(currentFieldName, token, parser, parseFieldMatcher, otherOptions)) { + protected boolean token(String aggregationName, String currentFieldName, Token token, + XContentParseContext context, Map otherOptions) throws IOException { + XContentParser parser = context.getParser(); + if (incExcParser.token(currentFieldName, token, parser, context.getParseFieldMatcher(), otherOptions)) { return true; } else if (token == XContentParser.Token.VALUE_STRING) { - if (parseFieldMatcher.match(currentFieldName, EXECUTION_HINT_FIELD_NAME)) { + if (context.matchField(currentFieldName, EXECUTION_HINT_FIELD_NAME)) { otherOptions.put(EXECUTION_HINT_FIELD_NAME, parser.text()); return true; - } else if (parseFieldMatcher.match(currentFieldName, SubAggCollectionMode.KEY)) { - otherOptions.put(SubAggCollectionMode.KEY, SubAggCollectionMode.parse(parser.text(), parseFieldMatcher)); + } else if (context.matchField(currentFieldName, SubAggCollectionMode.KEY)) { + otherOptions.put(SubAggCollectionMode.KEY, SubAggCollectionMode.parse(parser.text(), context.getParseFieldMatcher())); return true; - } else if (parseFieldMatcher.match(currentFieldName, REQUIRED_SIZE_FIELD_NAME)) { + } else if (context.matchField(currentFieldName, REQUIRED_SIZE_FIELD_NAME)) { otherOptions.put(REQUIRED_SIZE_FIELD_NAME, parser.intValue()); return true; - } else if (parseSpecial(aggregationName, parser, parseFieldMatcher, token, currentFieldName, otherOptions)) { + } else if (parseSpecial(aggregationName, context, token, currentFieldName, otherOptions)) { return true; } } else if (token == XContentParser.Token.VALUE_NUMBER) { - if (parseFieldMatcher.match(currentFieldName, REQUIRED_SIZE_FIELD_NAME)) { + if (context.matchField(currentFieldName, REQUIRED_SIZE_FIELD_NAME)) { otherOptions.put(REQUIRED_SIZE_FIELD_NAME, parser.intValue()); return true; - } else if (parseFieldMatcher.match(currentFieldName, SHARD_SIZE_FIELD_NAME)) { + } else if (context.matchField(currentFieldName, SHARD_SIZE_FIELD_NAME)) { otherOptions.put(SHARD_SIZE_FIELD_NAME, parser.intValue()); return true; - } else if (parseFieldMatcher.match(currentFieldName, MIN_DOC_COUNT_FIELD_NAME)) { + } else if (context.matchField(currentFieldName, MIN_DOC_COUNT_FIELD_NAME)) { otherOptions.put(MIN_DOC_COUNT_FIELD_NAME, parser.longValue()); return true; - } else if (parseFieldMatcher.match(currentFieldName, SHARD_MIN_DOC_COUNT_FIELD_NAME)) { + } else if (context.matchField(currentFieldName, SHARD_MIN_DOC_COUNT_FIELD_NAME)) { otherOptions.put(SHARD_MIN_DOC_COUNT_FIELD_NAME, parser.longValue()); return true; - } else if (parseSpecial(aggregationName, parser, parseFieldMatcher, token, currentFieldName, otherOptions)) { + } else if (parseSpecial(aggregationName, context, token, currentFieldName, otherOptions)) { return true; } - } else if (parseSpecial(aggregationName, parser, parseFieldMatcher, token, currentFieldName, otherOptions)) { + } else if (parseSpecial(aggregationName, context, token, currentFieldName, otherOptions)) { return true; } return false; } - public abstract boolean parseSpecial(String aggregationName, XContentParser parser, ParseFieldMatcher parseFieldMatcher, - XContentParser.Token token, String currentFieldName, Map otherOptions) throws IOException; + public abstract boolean parseSpecial(String aggregationName, XContentParseContext context, + Token token, String currentFieldName, Map otherOptions) throws IOException; protected abstract TermsAggregator.BucketCountThresholds getDefaultBucketCountThresholds(); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsParser.java index 2a67dbe2218..bf8b06ab65a 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsParser.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.aggregations.bucket.terms; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; @@ -27,6 +26,7 @@ import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator.BucketCountThresholds; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; +import org.elasticsearch.search.aggregations.support.XContentParseContext; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSourceType; @@ -75,15 +75,16 @@ public class TermsParser extends AbstractTermsParser { } @Override - public boolean parseSpecial(String aggregationName, XContentParser parser, ParseFieldMatcher parseFieldMatcher, Token token, - String currentFieldName, Map otherOptions) throws IOException { + public boolean parseSpecial(String aggregationName, XContentParseContext context, Token token, + String currentFieldName, Map otherOptions) throws IOException { + XContentParser parser = context.getParser(); if (token == XContentParser.Token.START_OBJECT) { - if (parseFieldMatcher.match(currentFieldName, TermsAggregationBuilder.ORDER_FIELD)) { + if (context.matchField(currentFieldName, TermsAggregationBuilder.ORDER_FIELD)) { otherOptions.put(TermsAggregationBuilder.ORDER_FIELD, Collections.singletonList(parseOrderParam(aggregationName, parser))); return true; } } else if (token == XContentParser.Token.START_ARRAY) { - if (parseFieldMatcher.match(currentFieldName, TermsAggregationBuilder.ORDER_FIELD)) { + if (context.matchField(currentFieldName, TermsAggregationBuilder.ORDER_FIELD)) { List orderElements = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token == XContentParser.Token.START_OBJECT) { @@ -98,7 +99,7 @@ public class TermsParser extends AbstractTermsParser { return true; } } else if (token == XContentParser.Token.VALUE_BOOLEAN) { - if (parseFieldMatcher.match(currentFieldName, TermsAggregationBuilder.SHOW_TERM_DOC_COUNT_ERROR)) { + if (context.matchField(currentFieldName, TermsAggregationBuilder.SHOW_TERM_DOC_COUNT_ERROR)) { otherOptions.put(TermsAggregationBuilder.SHOW_TERM_DOC_COUNT_ERROR, parser.booleanValue()); return true; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgParser.java index b4f9261b1eb..bc6f762295c 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgParser.java @@ -19,9 +19,9 @@ package org.elasticsearch.search.aggregations.metrics.avg; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.NumericValuesSourceParser; +import org.elasticsearch.search.aggregations.support.XContentParseContext; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSourceType; @@ -38,8 +38,8 @@ public class AvgParser extends NumericValuesSourceParser { } @Override - protected boolean token(String aggregationName, String currentFieldName, XContentParser.Token token, XContentParser parser, - ParseFieldMatcher parseFieldMatcher, Map otherOptions) throws IOException { + protected boolean token(String aggregationName, String currentFieldName, XContentParser.Token token, + XContentParseContext context, Map otherOptions) throws IOException { return false; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityParser.java index 3a2e6a2072a..e40e0767994 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityParser.java @@ -20,10 +20,9 @@ package org.elasticsearch.search.aggregations.metrics.cardinality; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.AnyValuesSourceParser; +import org.elasticsearch.search.aggregations.support.XContentParseContext; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSourceType; @@ -51,13 +50,13 @@ public class CardinalityParser extends AnyValuesSourceParser { } @Override - protected boolean token(String aggregationName, String currentFieldName, Token token, XContentParser parser, - ParseFieldMatcher parseFieldMatcher, Map otherOptions) throws IOException { + protected boolean token(String aggregationName, String currentFieldName, Token token, + XContentParseContext context, Map otherOptions) throws IOException { if (token.isValue()) { - if (parseFieldMatcher.match(currentFieldName, CardinalityAggregationBuilder.PRECISION_THRESHOLD_FIELD)) { - otherOptions.put(CardinalityAggregationBuilder.PRECISION_THRESHOLD_FIELD, parser.longValue()); + if (context.matchField(currentFieldName, CardinalityAggregationBuilder.PRECISION_THRESHOLD_FIELD)) { + otherOptions.put(CardinalityAggregationBuilder.PRECISION_THRESHOLD_FIELD, context.getParser().longValue()); return true; - } else if (parseFieldMatcher.match(currentFieldName, REHASH)) { + } else if (context.matchField(currentFieldName, REHASH)) { // ignore return true; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsParser.java index 7420fc0149e..c42de23949b 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsParser.java @@ -20,10 +20,10 @@ package org.elasticsearch.search.aggregations.metrics.geobounds; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.GeoPointValuesSourceParser; +import org.elasticsearch.search.aggregations.support.XContentParseContext; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSourceType; @@ -48,11 +48,11 @@ public class GeoBoundsParser extends GeoPointValuesSourceParser { } @Override - protected boolean token(String aggregationName, String currentFieldName, Token token, XContentParser parser, - ParseFieldMatcher parseFieldMatcher, Map otherOptions) throws IOException { + protected boolean token(String aggregationName, String currentFieldName, Token token, + XContentParseContext context, Map otherOptions) throws IOException { if (token == XContentParser.Token.VALUE_BOOLEAN) { - if (parseFieldMatcher.match(currentFieldName, GeoBoundsAggregator.WRAP_LONGITUDE_FIELD)) { - otherOptions.put(GeoBoundsAggregator.WRAP_LONGITUDE_FIELD, parser.booleanValue()); + if (context.matchField(currentFieldName, GeoBoundsAggregator.WRAP_LONGITUDE_FIELD)) { + otherOptions.put(GeoBoundsAggregator.WRAP_LONGITUDE_FIELD, context.getParser().booleanValue()); return true; } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidParser.java index 6c9e9ba67b0..8e88a11c6b6 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidParser.java @@ -20,10 +20,9 @@ package org.elasticsearch.search.aggregations.metrics.geocentroid; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.GeoPointValuesSourceParser; +import org.elasticsearch.search.aggregations.support.XContentParseContext; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSourceType; @@ -40,8 +39,8 @@ public class GeoCentroidParser extends GeoPointValuesSourceParser { } @Override - protected boolean token(String aggregationName, String currentFieldName, Token token, XContentParser parser, - ParseFieldMatcher parseFieldMatcher, Map otherOptions) throws IOException { + protected boolean token(String aggregationName, String currentFieldName, Token token, + XContentParseContext context, Map otherOptions) throws IOException { return false; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxParser.java index d2ddd4daa08..f0290e93fa9 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxParser.java @@ -19,9 +19,9 @@ package org.elasticsearch.search.aggregations.metrics.max; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.NumericValuesSourceParser; +import org.elasticsearch.search.aggregations.support.XContentParseContext; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSourceType; @@ -38,8 +38,8 @@ public class MaxParser extends NumericValuesSourceParser { } @Override - protected boolean token(String aggregationName, String currentFieldName, XContentParser.Token token, XContentParser parser, - ParseFieldMatcher parseFieldMatcher, Map otherOptions) throws IOException { + protected boolean token(String aggregationName, String currentFieldName, XContentParser.Token token, + XContentParseContext context, Map otherOptions) throws IOException { return false; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinParser.java index 194c08fc49b..4381ca41899 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinParser.java @@ -19,10 +19,9 @@ package org.elasticsearch.search.aggregations.metrics.min; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.NumericValuesSourceParser; +import org.elasticsearch.search.aggregations.support.XContentParseContext; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSourceType; @@ -39,8 +38,8 @@ public class MinParser extends NumericValuesSourceParser { } @Override - protected boolean token(String aggregationName, String currentFieldName, Token token, XContentParser parser, - ParseFieldMatcher parseFieldMatcher, Map otherOptions) throws IOException { + protected boolean token(String aggregationName, String currentFieldName, Token token, + XContentParseContext context, Map otherOptions) throws IOException { return false; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractPercentilesParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractPercentilesParser.java index ec145754a04..053a415c971 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractPercentilesParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractPercentilesParser.java @@ -21,10 +21,10 @@ package org.elasticsearch.search.aggregations.metrics.percentiles; import com.carrotsearch.hppc.DoubleArrayList; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.NumericValuesSourceParser; +import org.elasticsearch.search.aggregations.support.XContentParseContext; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; @@ -45,10 +45,11 @@ public abstract class AbstractPercentilesParser extends NumericValuesSourceParse } @Override - protected boolean token(String aggregationName, String currentFieldName, Token token, XContentParser parser, - ParseFieldMatcher parseFieldMatcher, Map otherOptions) throws IOException { + protected boolean token(String aggregationName, String currentFieldName, Token token, + XContentParseContext context, Map otherOptions) throws IOException { + XContentParser parser = context.getParser(); if (token == XContentParser.Token.START_ARRAY) { - if (parseFieldMatcher.match(currentFieldName, keysField())) { + if (context.matchField(currentFieldName, keysField())) { DoubleArrayList values = new DoubleArrayList(10); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { double value = parser.doubleValue(); @@ -61,7 +62,7 @@ public abstract class AbstractPercentilesParser extends NumericValuesSourceParse return false; } } else if (token == XContentParser.Token.VALUE_BOOLEAN) { - if (parseFieldMatcher.match(currentFieldName, KEYED_FIELD)) { + if (context.matchField(currentFieldName, KEYED_FIELD)) { boolean keyed = parser.booleanValue(); otherOptions.put(KEYED_FIELD, keyed); return true; @@ -80,7 +81,7 @@ public abstract class AbstractPercentilesParser extends NumericValuesSourceParse if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.VALUE_NUMBER) { - if (parseFieldMatcher.match(currentFieldName, COMPRESSION_FIELD)) { + if (context.matchField(currentFieldName, COMPRESSION_FIELD)) { double compression = parser.doubleValue(); otherOptions.put(COMPRESSION_FIELD, compression); } else { @@ -96,7 +97,7 @@ public abstract class AbstractPercentilesParser extends NumericValuesSourceParse if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.VALUE_NUMBER) { - if (parseFieldMatcher.match(currentFieldName, NUMBER_SIGNIFICANT_DIGITS_FIELD)) { + if (context.matchField(currentFieldName, NUMBER_SIGNIFICANT_DIGITS_FIELD)) { int numberOfSignificantValueDigits = parser.intValue(); otherOptions.put(NUMBER_SIGNIFICANT_DIGITS_FIELD, numberOfSignificantValueDigits); } else { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java index ec0b2aef613..244881a5155 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java @@ -232,13 +232,13 @@ public class ScriptedMetricAggregationBuilder extends AbstractAggregationBuilder currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.START_OBJECT || token == XContentParser.Token.VALUE_STRING) { if (context.getParseFieldMatcher().match(currentFieldName, INIT_SCRIPT_FIELD)) { - initScript = Script.parse(parser, context.getParseFieldMatcher()); + initScript = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage()); } else if (context.getParseFieldMatcher().match(currentFieldName, MAP_SCRIPT_FIELD)) { - mapScript = Script.parse(parser, context.getParseFieldMatcher()); + mapScript = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage()); } else if (context.getParseFieldMatcher().match(currentFieldName, COMBINE_SCRIPT_FIELD)) { - combineScript = Script.parse(parser, context.getParseFieldMatcher()); + combineScript = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage()); } else if (context.getParseFieldMatcher().match(currentFieldName, REDUCE_SCRIPT_FIELD)) { - reduceScript = Script.parse(parser, context.getParseFieldMatcher()); + reduceScript = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage()); } else if (token == XContentParser.Token.START_OBJECT && context.getParseFieldMatcher().match(currentFieldName, PARAMS_FIELD)) { params = parser.map(); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsParser.java index eacfc0068b4..60e3d2ef0aa 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsParser.java @@ -19,9 +19,9 @@ package org.elasticsearch.search.aggregations.metrics.stats; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.NumericValuesSourceParser; +import org.elasticsearch.search.aggregations.support.XContentParseContext; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSourceType; @@ -38,8 +38,8 @@ public class StatsParser extends NumericValuesSourceParser { } @Override - protected boolean token(String aggregationName, String currentFieldName, XContentParser.Token token, XContentParser parser, - ParseFieldMatcher parseFieldMatcher, Map otherOptions) throws IOException { + protected boolean token(String aggregationName, String currentFieldName, XContentParser.Token token, + XContentParseContext context, Map otherOptions) throws IOException { return false; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsParser.java index c650847360f..9644d26e93a 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsParser.java @@ -19,9 +19,9 @@ package org.elasticsearch.search.aggregations.metrics.stats.extended; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.NumericValuesSourceParser; +import org.elasticsearch.search.aggregations.support.XContentParseContext; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSourceType; @@ -38,11 +38,11 @@ public class ExtendedStatsParser extends NumericValuesSourceParser { } @Override - protected boolean token(String aggregationName, String currentFieldName, XContentParser.Token token, XContentParser parser, - ParseFieldMatcher parseFieldMatcher, Map otherOptions) throws IOException { - if (parseFieldMatcher.match(currentFieldName, ExtendedStatsAggregator.SIGMA_FIELD)) { + protected boolean token(String aggregationName, String currentFieldName, XContentParser.Token token, + XContentParseContext context, Map otherOptions) throws IOException { + if (context.matchField(currentFieldName, ExtendedStatsAggregator.SIGMA_FIELD)) { if (token.isValue()) { - otherOptions.put(ExtendedStatsAggregator.SIGMA_FIELD, parser.doubleValue()); + otherOptions.put(ExtendedStatsAggregator.SIGMA_FIELD, context.getParser().doubleValue()); return true; } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumParser.java index 6edc6cc8905..ee82829b0a7 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumParser.java @@ -19,9 +19,9 @@ package org.elasticsearch.search.aggregations.metrics.sum; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.NumericValuesSourceParser; +import org.elasticsearch.search.aggregations.support.XContentParseContext; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSourceType; @@ -38,8 +38,8 @@ public class SumParser extends NumericValuesSourceParser { } @Override - protected boolean token(String aggregationName, String currentFieldName, XContentParser.Token token, XContentParser parser, - ParseFieldMatcher parseFieldMatcher, Map otherOptions) throws IOException { + protected boolean token(String aggregationName, String currentFieldName, XContentParser.Token token, + XContentParseContext context, Map otherOptions) throws IOException { return false; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java index 0c21f78aa06..828d5679846 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java @@ -622,7 +622,7 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder otherOptions) throws IOException { + protected boolean token(String aggregationName, String currentFieldName, XContentParser.Token token, + XContentParseContext context, Map otherOptions) throws IOException { return false; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregationBuilder.java index cee17076e5d..cd7b1bb828e 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregationBuilder.java @@ -179,7 +179,7 @@ public class BucketScriptPipelineAggregationBuilder extends AbstractPipelineAggr } else if (context.getParseFieldMatcher().match(currentFieldName, GAP_POLICY)) { gapPolicy = GapPolicy.parse(context, parser.text(), parser.getTokenLocation()); } else if (context.getParseFieldMatcher().match(currentFieldName, ScriptField.SCRIPT)) { - script = Script.parse(parser, context.getParseFieldMatcher()); + script = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage()); } else { throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + reducerName + "]: [" + currentFieldName + "]."); @@ -201,7 +201,7 @@ public class BucketScriptPipelineAggregationBuilder extends AbstractPipelineAggr } } else if (token == XContentParser.Token.START_OBJECT) { if (context.getParseFieldMatcher().match(currentFieldName, ScriptField.SCRIPT)) { - script = Script.parse(parser, context.getParseFieldMatcher()); + script = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage()); } else if (context.getParseFieldMatcher().match(currentFieldName, BUCKETS_PATH)) { Map map = parser.map(); bucketsPathsMap = new HashMap<>(); @@ -260,4 +260,4 @@ public class BucketScriptPipelineAggregationBuilder extends AbstractPipelineAggr public String getWriteableName() { return NAME; } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketselector/BucketSelectorPipelineAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketselector/BucketSelectorPipelineAggregationBuilder.java index 97cf02d69a1..e3b42376728 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketselector/BucketSelectorPipelineAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketselector/BucketSelectorPipelineAggregationBuilder.java @@ -142,7 +142,7 @@ public class BucketSelectorPipelineAggregationBuilder extends AbstractPipelineAg } else if (context.getParseFieldMatcher().match(currentFieldName, GAP_POLICY)) { gapPolicy = GapPolicy.parse(context, parser.text(), parser.getTokenLocation()); } else if (context.getParseFieldMatcher().match(currentFieldName, ScriptField.SCRIPT)) { - script = Script.parse(parser, context.getParseFieldMatcher()); + script = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage()); } else { throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + reducerName + "]: [" + currentFieldName + "]."); @@ -164,7 +164,7 @@ public class BucketSelectorPipelineAggregationBuilder extends AbstractPipelineAg } } else if (token == XContentParser.Token.START_OBJECT) { if (context.getParseFieldMatcher().match(currentFieldName, ScriptField.SCRIPT)) { - script = Script.parse(parser, context.getParseFieldMatcher()); + script = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage()); } else if (context.getParseFieldMatcher().match(currentFieldName, BUCKETS_PATH)) { Map map = parser.map(); bucketsPathsMap = new HashMap<>(); @@ -219,4 +219,4 @@ public class BucketSelectorPipelineAggregationBuilder extends AbstractPipelineAg public String getWriteableName() { return NAME; } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/support/AbstractValuesSourceParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/support/AbstractValuesSourceParser.java index 51d2ea2e8c9..57eea9ccf65 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/support/AbstractValuesSourceParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/support/AbstractValuesSourceParser.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.aggregations.support; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryParseContext; @@ -95,6 +94,8 @@ public abstract class AbstractValuesSourceParser Object missing = null; DateTimeZone timezone = null; Map otherOptions = new HashMap<>(); + XContentParseContext parserContext = + new XContentParseContext(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage()); XContentParser.Token token; String currentFieldName = null; @@ -126,22 +127,22 @@ public abstract class AbstractValuesSourceParser + valueType + "]. It can only work on value of type [" + targetValueType + "]"); } - } else if (!token(aggregationName, currentFieldName, token, parser, context.getParseFieldMatcher(), otherOptions)) { + } else if (!token(aggregationName, currentFieldName, token, parserContext, otherOptions)) { throw new ParsingException(parser.getTokenLocation(), "Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "]."); } - } else if (!token(aggregationName, currentFieldName, token, parser, context.getParseFieldMatcher(), otherOptions)) { + } else if (!token(aggregationName, currentFieldName, token, parserContext, otherOptions)) { throw new ParsingException(parser.getTokenLocation(), "Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "]."); } } else if (scriptable && token == XContentParser.Token.START_OBJECT) { if (context.getParseFieldMatcher().match(currentFieldName, ScriptField.SCRIPT)) { - script = Script.parse(parser, context.getParseFieldMatcher()); - } else if (!token(aggregationName, currentFieldName, token, parser, context.getParseFieldMatcher(), otherOptions)) { + script = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage()); + } else if (!token(aggregationName, currentFieldName, token, parserContext, otherOptions)) { throw new ParsingException(parser.getTokenLocation(), "Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "]."); } - } else if (!token(aggregationName, currentFieldName, token, parser, context.getParseFieldMatcher(), otherOptions)) { + } else if (!token(aggregationName, currentFieldName, token, parserContext, otherOptions)) { throw new ParsingException(parser.getTokenLocation(), "Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "]."); } @@ -184,8 +185,7 @@ public abstract class AbstractValuesSourceParser * the target type of the final value output by the aggregation * @param otherOptions * a {@link Map} containing the extra options parsed by the - * {@link #token(String, String, org.elasticsearch.common.xcontent.XContentParser.Token, - * XContentParser, ParseFieldMatcher, Map)} + * {@link #token(String, String, XContentParser.Token, XContentParseContext, Map)} * method * @return the created factory */ @@ -203,10 +203,8 @@ public abstract class AbstractValuesSourceParser * the name of the current field being parsed * @param token * the current token for the parser - * @param parser - * the parser - * @param parseFieldMatcher - * the {@link ParseFieldMatcher} to use to match field names + * @param context + * the query context * @param otherOptions * a {@link Map} of options to be populated by successive calls * to this method which will then be passed to the @@ -217,6 +215,6 @@ public abstract class AbstractValuesSourceParser * @throws IOException * if an error occurs whilst parsing */ - protected abstract boolean token(String aggregationName, String currentFieldName, XContentParser.Token token, XContentParser parser, - ParseFieldMatcher parseFieldMatcher, Map otherOptions) throws IOException; + protected abstract boolean token(String aggregationName, String currentFieldName, XContentParser.Token token, + XContentParseContext context, Map otherOptions) throws IOException; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/support/XContentParseContext.java b/core/src/main/java/org/elasticsearch/search/aggregations/support/XContentParseContext.java new file mode 100644 index 00000000000..07c33f1f473 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/aggregations/support/XContentParseContext.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.support; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.ParseFieldMatcher; +import org.elasticsearch.common.xcontent.XContentParser; + +/** + * A minimal context for parsing xcontent into aggregation builders. + * Only a minimal set of dependencies and settings are available. + */ +public final class XContentParseContext { + + private final XContentParser parser; + + private final ParseFieldMatcher parseFieldMatcher; + + private final String defaultScriptLanguage; + + public XContentParseContext(XContentParser parser, ParseFieldMatcher parseFieldMatcher, String defaultScriptLanguage) { + this.parser = parser; + this.parseFieldMatcher = parseFieldMatcher; + this.defaultScriptLanguage = defaultScriptLanguage; + } + + public XContentParser getParser() { + return parser; + } + + public ParseFieldMatcher getParseFieldMatcher() { + return parseFieldMatcher; + } + + public String getDefaultScriptLanguage() { + return defaultScriptLanguage; + } + + /** + * Returns whether the parse field we're looking for matches with the found field name. + * + * Helper that delegates to {@link ParseFieldMatcher#match(String, ParseField)}. + */ + public boolean matchField(String fieldName, ParseField parseField) { + return parseFieldMatcher.match(fieldName, parseField); + } + +} diff --git a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 7cfc998836d..309e49448e9 100644 --- a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -1273,7 +1273,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ currentFieldName = parser.currentName(); } else if (token.isValue()) { if (context.getParseFieldMatcher().match(currentFieldName, SCRIPT_FIELD)) { - script = Script.parse(parser, context.getParseFieldMatcher()); + script = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage()); } else if (context.getParseFieldMatcher().match(currentFieldName, IGNORE_FAILURE_FIELD)) { ignoreFailure = parser.booleanValue(); } else { @@ -1282,7 +1282,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ } } else if (token == XContentParser.Token.START_OBJECT) { if (context.getParseFieldMatcher().match(currentFieldName, SCRIPT_FIELD)) { - script = Script.parse(parser, context.getParseFieldMatcher()); + script = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage()); } else { throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName + "].", parser.getTokenLocation()); diff --git a/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java index 8c0436361c2..0a7cb5e1b36 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java @@ -244,7 +244,7 @@ public class ScriptSortBuilder extends SortBuilder { currentName = parser.currentName(); } else if (token == XContentParser.Token.START_OBJECT) { if (parseField.match(currentName, ScriptField.SCRIPT)) { - script = Script.parse(parser, parseField); + script = Script.parse(parser, parseField, context.getDefaultScriptLanguage()); } else if (parseField.match(currentName, NESTED_FILTER_FIELD)) { nestedFilter = context.parseInnerQueryBuilder(); } else { @@ -260,7 +260,7 @@ public class ScriptSortBuilder extends SortBuilder { } else if (parseField.match(currentName, NESTED_PATH_FIELD)) { nestedPath = parser.text(); } else if (parseField.match(currentName, ScriptField.SCRIPT)) { - script = Script.parse(parser, parseField); + script = Script.parse(parser, parseField, context.getDefaultScriptLanguage()); } else { throw new ParsingException(parser.getTokenLocation(), "[" + NAME + "] failed to parse field [" + currentName + "]"); } diff --git a/core/src/main/java/org/elasticsearch/search/suggest/completion2x/Completion090PostingsFormat.java b/core/src/main/java/org/elasticsearch/search/suggest/completion2x/Completion090PostingsFormat.java index d3008e999e8..245f2416b40 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/completion2x/Completion090PostingsFormat.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/completion2x/Completion090PostingsFormat.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.search.suggest.completion2x; +import org.apache.logging.log4j.Logger; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.codecs.FieldsConsumer; import org.apache.lucene.codecs.FieldsProducer; @@ -42,7 +43,6 @@ import org.apache.lucene.util.Accountables; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.Version; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.index.mapper.CompletionFieldMapper2x; import org.elasticsearch.index.mapper.MappedFieldType; @@ -75,7 +75,7 @@ public class Completion090PostingsFormat extends PostingsFormat { public static final int SUGGEST_VERSION_CURRENT = SUGGEST_CODEC_VERSION; public static final String EXTENSION = "cmp"; - private static final ESLogger logger = Loggers.getLogger(Completion090PostingsFormat.class); + private static final Logger logger = Loggers.getLogger(Completion090PostingsFormat.class); private PostingsFormat delegatePostingsFormat; private static final Map providers; private CompletionLookupProvider writeProvider; diff --git a/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java index 2ccf24a8dc1..180f8f0af11 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -22,6 +22,8 @@ import com.carrotsearch.hppc.IntHashSet; import com.carrotsearch.hppc.IntSet; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.IndicesOptions; @@ -63,7 +65,6 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryData; @@ -461,7 +462,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis @Override public void onFailure(String source, Exception e) { - logger.warn("[{}] failed to restore snapshot", e, snapshotId); + logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to restore snapshot", snapshotId), e); listener.onFailure(e); } @@ -478,7 +479,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis } catch (Exception e) { - logger.warn("[{}] failed to restore snapshot", e, request.repositoryName + ":" + request.snapshotName); + logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to restore snapshot", request.repositoryName + ":" + request.snapshotName), e); listener.onFailure(e); } } @@ -602,7 +603,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis @Override public void onFailure(String source, @Nullable Exception e) { for (UpdateIndexShardRestoreStatusRequest request : drainedRequests) { - logger.warn("[{}][{}] failed to update snapshot status to [{}]", e, request.snapshot(), request.shardId(), request.status()); + logger.warn((Supplier) () -> new ParameterizedMessage("[{}][{}] failed to update snapshot status to [{}]", request.snapshot(), request.shardId(), request.status()), e); } } @@ -670,7 +671,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis try { listener.onResponse(new RestoreCompletionResponse(snapshot, restoreInfo)); } catch (Exception e) { - logger.warn("failed to update snapshot status for [{}]", e, listener); + logger.warn((Supplier) () -> new ParameterizedMessage("failed to update snapshot status for [{}]", listener), e); } } } diff --git a/core/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/core/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index e957d2deb6c..1f7a4ee4fd6 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/core/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -20,6 +20,8 @@ package org.elasticsearch.snapshots; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.IndexCommit; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -312,7 +314,7 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements @Override public void onFailure(Exception e) { - logger.warn("[{}] [{}] failed to create snapshot", e, shardId, entry.getKey()); + logger.warn((Supplier) () -> new ParameterizedMessage("[{}] [{}] failed to create snapshot", shardId, entry.getKey()), e); updateIndexShardSnapshotStatus(entry.getKey(), shardId, new SnapshotsInProgress.ShardSnapshotStatus(localNodeId, SnapshotsInProgress.State.FAILED, ExceptionsHelper.detailedMessage(e))); } @@ -494,7 +496,7 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements UPDATE_SNAPSHOT_ACTION_NAME, request, EmptyTransportResponseHandler.INSTANCE_SAME); } } catch (Exception e) { - logger.warn("[{}] [{}] failed to update snapshot state", e, request.snapshot(), request.status()); + logger.warn((Supplier) () -> new ParameterizedMessage("[{}] [{}] failed to update snapshot state", request.snapshot(), request.status()), e); } } @@ -578,7 +580,7 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements @Override public void onFailure(String source, Exception e) { for (UpdateIndexShardSnapshotStatusRequest request : drainedRequests) { - logger.warn("[{}][{}] failed to update snapshot status to [{}]", e, request.snapshot(), request.shardId(), request.status()); + logger.warn((Supplier) () -> new ParameterizedMessage("[{}][{}] failed to update snapshot status to [{}]", request.snapshot(), request.shardId(), request.status()), e); } } }); diff --git a/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index a0c1ddf1ea7..ea8deea5661 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -21,6 +21,8 @@ package org.elasticsearch.snapshots; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; @@ -180,7 +182,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus snapshotSet.add(repository.getSnapshotInfo(snapshotId)); } catch (Exception ex) { if (ignoreUnavailable) { - logger.warn("failed to get snapshot [{}]", ex, snapshotId); + logger.warn((Supplier) () -> new ParameterizedMessage("failed to get snapshot [{}]", snapshotId), ex); } else { throw new SnapshotException(repositoryName, snapshotId, "Snapshot could not be read", ex); } @@ -254,7 +256,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus @Override public void onFailure(String source, Exception e) { - logger.warn("[{}][{}] failed to create snapshot", e, repositoryName, snapshotName); + logger.warn((Supplier) () -> new ParameterizedMessage("[{}][{}] failed to create snapshot", repositoryName, snapshotName), e); newSnapshot = null; listener.onFailure(e); } @@ -405,7 +407,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus @Override public void onFailure(String source, Exception e) { - logger.warn("[{}] failed to create snapshot", e, snapshot.snapshot().getSnapshotId()); + logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to create snapshot", snapshot.snapshot().getSnapshotId()), e); removeSnapshotFromClusterState(snapshot.snapshot(), null, e, new CleanupAfterErrorListener(snapshot, true, userCreateSnapshotListener, e)); } @@ -427,7 +429,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus } }); } catch (Exception e) { - logger.warn("failed to create snapshot [{}]", e, snapshot.snapshot().getSnapshotId()); + logger.warn((Supplier) () -> new ParameterizedMessage("failed to create snapshot [{}]", snapshot.snapshot().getSnapshotId()), e); removeSnapshotFromClusterState(snapshot.snapshot(), null, e, new CleanupAfterErrorListener(snapshot, snapshotCreated, userCreateSnapshotListener, e)); } } @@ -469,7 +471,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus Collections.emptyList()); } catch (Exception inner) { inner.addSuppressed(exception); - logger.warn("[{}] failed to close snapshot in repository", inner, snapshot.snapshot()); + logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to close snapshot in repository", snapshot.snapshot()), inner); } } userCreateSnapshotListener.onFailure(e); @@ -722,7 +724,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus @Override public void onFailure(String source, Exception e) { - logger.warn("failed to update snapshot state after shards started from [{}] ", e, source); + logger.warn((Supplier) () -> new ParameterizedMessage("failed to update snapshot state after shards started from [{}] ", source), e); } }); } @@ -876,7 +878,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus SnapshotInfo snapshotInfo = repository.finalizeSnapshot(snapshot.getSnapshotId(), entry.indices(), entry.startTime(), failure, entry.shards().size(), Collections.unmodifiableList(shardFailures)); removeSnapshotFromClusterState(snapshot, snapshotInfo, null); } catch (Exception e) { - logger.warn("[{}] failed to finalize snapshot", e, snapshot); + logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to finalize snapshot", snapshot), e); removeSnapshotFromClusterState(snapshot, null, e); } } @@ -925,7 +927,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus @Override public void onFailure(String source, Exception e) { - logger.warn("[{}] failed to remove snapshot metadata", e, snapshot); + logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to remove snapshot metadata", snapshot), e); if (listener != null) { listener.onFailure(e); } @@ -941,7 +943,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus listener.onSnapshotFailure(snapshot, failure); } } catch (Exception t) { - logger.warn("failed to notify listener [{}]", t, listener); + logger.warn((Supplier) () -> new ParameterizedMessage("failed to notify listener [{}]", listener), t); } } if (listener != null) { diff --git a/core/src/main/java/org/elasticsearch/tasks/LoggingTaskListener.java b/core/src/main/java/org/elasticsearch/tasks/LoggingTaskListener.java index 002d0b6a468..c3eeaa6ee8d 100644 --- a/core/src/main/java/org/elasticsearch/tasks/LoggingTaskListener.java +++ b/core/src/main/java/org/elasticsearch/tasks/LoggingTaskListener.java @@ -19,7 +19,9 @@ package org.elasticsearch.tasks; -import org.elasticsearch.common.logging.ESLogger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.common.logging.Loggers; /** @@ -27,7 +29,7 @@ import org.elasticsearch.common.logging.Loggers; * need a listener but aren't returning the result to the user. */ public final class LoggingTaskListener implements TaskListener { - private static final ESLogger logger = Loggers.getLogger(LoggingTaskListener.class); + private static final Logger logger = Loggers.getLogger(LoggingTaskListener.class); /** * Get the instance of NoopActionListener cast appropriately. @@ -49,6 +51,6 @@ public final class LoggingTaskListener implements TaskListener) () -> new ParameterizedMessage("{} failed with exception", task.getId()), e); } } diff --git a/core/src/main/java/org/elasticsearch/tasks/TaskManager.java b/core/src/main/java/org/elasticsearch/tasks/TaskManager.java index f0fea6aa2a1..003a51c3175 100644 --- a/core/src/main/java/org/elasticsearch/tasks/TaskManager.java +++ b/core/src/main/java/org/elasticsearch/tasks/TaskManager.java @@ -19,6 +19,8 @@ package org.elasticsearch.tasks; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.ExceptionsHelper; @@ -166,7 +168,8 @@ public class TaskManager extends AbstractComponent implements ClusterStateListen try { taskResult = task.result(localNode, error); } catch (IOException ex) { - logger.warn("couldn't store error {}", ex, ExceptionsHelper.detailedMessage(error)); + logger.warn( + (Supplier) () -> new ParameterizedMessage("couldn't store error {}", ExceptionsHelper.detailedMessage(error)), ex); listener.onFailure(ex); return; } @@ -178,7 +181,8 @@ public class TaskManager extends AbstractComponent implements ClusterStateListen @Override public void onFailure(Exception e) { - logger.warn("couldn't store error {}", e, ExceptionsHelper.detailedMessage(error)); + logger.warn( + (Supplier) () -> new ParameterizedMessage("couldn't store error {}", ExceptionsHelper.detailedMessage(error)), e); listener.onFailure(e); } }); @@ -199,7 +203,7 @@ public class TaskManager extends AbstractComponent implements ClusterStateListen try { taskResult = task.result(localNode, response); } catch (IOException ex) { - logger.warn("couldn't store response {}", ex, response); + logger.warn((Supplier) () -> new ParameterizedMessage("couldn't store response {}", response), ex); listener.onFailure(ex); return; } @@ -212,7 +216,7 @@ public class TaskManager extends AbstractComponent implements ClusterStateListen @Override public void onFailure(Exception e) { - logger.warn("couldn't store response {}", e, response); + logger.warn((Supplier) () -> new ParameterizedMessage("couldn't store response {}", response), e); listener.onFailure(e); } }); diff --git a/core/src/main/java/org/elasticsearch/tasks/TaskResultsService.java b/core/src/main/java/org/elasticsearch/tasks/TaskResultsService.java index 4b68e8af97a..fd515c5733a 100644 --- a/core/src/main/java/org/elasticsearch/tasks/TaskResultsService.java +++ b/core/src/main/java/org/elasticsearch/tasks/TaskResultsService.java @@ -18,6 +18,8 @@ */ package org.elasticsearch.tasks; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; @@ -163,7 +165,9 @@ public class TaskResultsService extends AbstractComponent { Streams.copy(is, out); return out.toString(IOUtils.UTF_8); } catch (Exception e) { - logger.error("failed to create tasks results index template [{}]", e, TASK_RESULT_INDEX_MAPPING_FILE); + logger.error( + (Supplier) () -> new ParameterizedMessage( + "failed to create tasks results index template [{}]", TASK_RESULT_INDEX_MAPPING_FILE), e); throw new IllegalStateException("failed to create tasks results index template [" + TASK_RESULT_INDEX_MAPPING_FILE + "]", e); } diff --git a/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java index 044f19eaeea..d2ff4defc9e 100644 --- a/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java +++ b/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java @@ -19,13 +19,15 @@ package org.elasticsearch.threadpool; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.Counter; import org.apache.lucene.util.IOUtils; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.SizeValue; @@ -413,7 +415,7 @@ public class ThreadPool extends AbstractComponent implements Closeable { try { runnable.run(); } catch (Exception e) { - logger.warn("failed to run {}", e, runnable.toString()); + logger.warn((Supplier) () -> new ParameterizedMessage("failed to run {}", runnable.toString()), e); throw e; } } @@ -527,18 +529,14 @@ public class ThreadPool extends AbstractComponent implements Closeable { } } - public static class Info implements Streamable, ToXContent { + public static class Info implements Writeable, ToXContent { - private String name; - private ThreadPoolType type; - private int min; - private int max; - private TimeValue keepAlive; - private SizeValue queueSize; - - Info() { - - } + private final String name; + private final ThreadPoolType type; + private final int min; + private final int max; + private final TimeValue keepAlive; + private final SizeValue queueSize; public Info(String name, ThreadPoolType type) { this(name, type, -1); @@ -557,6 +555,25 @@ public class ThreadPool extends AbstractComponent implements Closeable { this.queueSize = queueSize; } + public Info(StreamInput in) throws IOException { + name = in.readString(); + type = ThreadPoolType.fromType(in.readString()); + min = in.readInt(); + max = in.readInt(); + keepAlive = in.readOptionalWriteable(TimeValue::new); + queueSize = in.readOptionalWriteable(SizeValue::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(name); + out.writeString(type.getType()); + out.writeInt(min); + out.writeInt(max); + out.writeOptionalWriteable(keepAlive); + out.writeOptionalWriteable(queueSize); + } + public String getName() { return this.name; } @@ -583,46 +600,6 @@ public class ThreadPool extends AbstractComponent implements Closeable { return this.queueSize; } - @Override - public void readFrom(StreamInput in) throws IOException { - name = in.readString(); - type = ThreadPoolType.fromType(in.readString()); - min = in.readInt(); - max = in.readInt(); - if (in.readBoolean()) { - keepAlive = new TimeValue(in); - } - if (in.readBoolean()) { - queueSize = SizeValue.readSizeValue(in); - } - in.readBoolean(); // here to conform with removed waitTime - in.readBoolean(); // here to conform with removed rejected setting - in.readBoolean(); // here to conform with queue type - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(name); - out.writeString(type.getType()); - out.writeInt(min); - out.writeInt(max); - if (keepAlive == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - keepAlive.writeTo(out); - } - if (queueSize == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - queueSize.writeTo(out); - } - out.writeBoolean(false); // here to conform with removed waitTime - out.writeBoolean(false); // here to conform with removed rejected setting - out.writeBoolean(false); // here to conform with queue type - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(name); @@ -652,7 +629,6 @@ public class ThreadPool extends AbstractComponent implements Closeable { static final String KEEP_ALIVE = "keep_alive"; static final String QUEUE_SIZE = "queue_size"; } - } /** @@ -779,14 +755,14 @@ public class ThreadPool extends AbstractComponent implements Closeable { @Override public void onFailure(Exception e) { - threadPool.logger.warn("failed to run scheduled task [{}] on thread pool [{}]", e, runnable.toString(), executor); + threadPool.logger.warn((Supplier) () -> new ParameterizedMessage("failed to run scheduled task [{}] on thread pool [{}]", runnable.toString(), executor), e); } @Override public void onRejection(Exception e) { run = false; if (threadPool.logger.isDebugEnabled()) { - threadPool.logger.debug("scheduled task [{}] was rejected on thread pool [{}]", e, runnable, executor); + threadPool.logger.debug((Supplier) () -> new ParameterizedMessage("scheduled task [{}] was rejected on thread pool [{}]", runnable, executor), e); } } diff --git a/core/src/main/java/org/elasticsearch/threadpool/ThreadPoolInfo.java b/core/src/main/java/org/elasticsearch/threadpool/ThreadPoolInfo.java index 729c6cb7364..70c0f2c9598 100644 --- a/core/src/main/java/org/elasticsearch/threadpool/ThreadPoolInfo.java +++ b/core/src/main/java/org/elasticsearch/threadpool/ThreadPoolInfo.java @@ -21,27 +21,30 @@ package org.elasticsearch.threadpool; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; -import java.util.ArrayList; +import java.util.Collections; import java.util.Iterator; import java.util.List; -/** - */ -public class ThreadPoolInfo implements Streamable, Iterable, ToXContent { - - private List infos; - - ThreadPoolInfo() { - } +public class ThreadPoolInfo implements Writeable, Iterable, ToXContent { + private final List infos; public ThreadPoolInfo(List infos) { - this.infos = infos; + this.infos = Collections.unmodifiableList(infos); + } + + public ThreadPoolInfo(StreamInput in) throws IOException { + this.infos = Collections.unmodifiableList(in.readList(ThreadPool.Info::new)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeList(infos); } @Override @@ -49,31 +52,6 @@ public class ThreadPoolInfo implements Streamable, Iterable, To return infos.iterator(); } - public static ThreadPoolInfo readThreadPoolInfo(StreamInput in) throws IOException { - ThreadPoolInfo info = new ThreadPoolInfo(); - info.readFrom(in); - return info; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - int size = in.readVInt(); - infos = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - ThreadPool.Info info = new ThreadPool.Info(); - info.readFrom(in); - infos.add(info); - } - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(infos.size()); - for (ThreadPool.Info info : infos) { - info.writeTo(out); - } - } - static final class Fields { static final String THREAD_POOL = "thread_pool"; } diff --git a/core/src/main/java/org/elasticsearch/transport/TcpTransport.java b/core/src/main/java/org/elasticsearch/transport/TcpTransport.java index 505890f2ead..00409f42995 100644 --- a/core/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/core/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -20,6 +20,8 @@ package org.elasticsearch.transport; import com.carrotsearch.hppc.IntHashSet; import com.carrotsearch.hppc.IntSet; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; @@ -258,10 +260,13 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i sendMessage(channel, pingHeader, successfulPings::inc, false); } catch (Exception e) { if (isOpen(channel)) { - logger.debug("[{}] failed to send ping transport message", e, node); + logger.debug( + (Supplier) () -> new ParameterizedMessage("[{}] failed to send ping transport message", node), e); failedPings.inc(); } else { - logger.trace("[{}] failed to send ping transport message (channel closed)", e, node); + logger.trace( + (Supplier) () -> new ParameterizedMessage( + "[{}] failed to send ping transport message (channel closed)", node), e); } } } @@ -397,7 +402,9 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i try { nodeChannels = connectToChannels(node); } catch (Exception e) { - logger.trace("failed to connect to [{}], cleaning dangling connections", e, node); + logger.trace( + (Supplier) () -> new ParameterizedMessage( + "failed to connect to [{}], cleaning dangling connections", node), e); throw e; } } @@ -772,7 +779,9 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i try { closeChannels(entry.getValue()); } catch (Exception e) { - logger.debug("Error closing serverChannel for profile [{}]", e, entry.getKey()); + logger.debug( + (Supplier) () -> new ParameterizedMessage( + "Error closing serverChannel for profile [{}]", entry.getKey()), e); } } @@ -802,21 +811,27 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i return; } if (isCloseConnectionException(e)) { - logger.trace("close connection exception caught on transport layer [{}], disconnecting from relevant node", e, - channel); + logger.trace( + (Supplier) () -> new ParameterizedMessage( + "close connection exception caught on transport layer [{}], disconnecting from relevant node", + channel), + e); // close the channel, which will cause a node to be disconnected if relevant disconnectFromNodeChannel(channel, e); } else if (isConnectException(e)) { - logger.trace("connect exception caught on transport layer [{}]", e, channel); + logger.trace((Supplier) () -> new ParameterizedMessage("connect exception caught on transport layer [{}]", channel), e); // close the channel as safe measure, which will cause a node to be disconnected if relevant disconnectFromNodeChannel(channel, e); } else if (e instanceof BindException) { - logger.trace("bind exception caught on transport layer [{}]", e, channel); + logger.trace((Supplier) () -> new ParameterizedMessage("bind exception caught on transport layer [{}]", channel), e); // close the channel as safe measure, which will cause a node to be disconnected if relevant disconnectFromNodeChannel(channel, e); } else if (e instanceof CancelledKeyException) { - logger.trace("cancelled key exception caught on transport layer [{}], disconnecting from relevant node", e, - channel); + logger.trace( + (Supplier) () -> new ParameterizedMessage( + "cancelled key exception caught on transport layer [{}], disconnecting from relevant node", + channel), + e); // close the channel as safe measure, which will cause a node to be disconnected if relevant disconnectFromNodeChannel(channel, e); } else if (e instanceof TcpTransport.HttpOnTransportException) { @@ -825,7 +840,8 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i sendMessage(channel, new BytesArray(e.getMessage().getBytes(StandardCharsets.UTF_8)), () -> {}, true); } } else { - logger.warn("exception caught on transport layer [{}], closing connection", e, channel); + logger.warn( + (Supplier) () -> new ParameterizedMessage("exception caught on transport layer [{}], closing connection", channel), e); // close the channel, which will cause a node to be disconnected if relevant disconnectFromNodeChannel(channel, e); } @@ -1260,7 +1276,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i try { handler.handleException(rtx); } catch (Exception e) { - logger.error("failed to handle exception response [{}]", e, handler); + logger.error((Supplier) () -> new ParameterizedMessage("failed to handle exception response [{}]", handler), e); } }); } @@ -1297,7 +1313,9 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i transportChannel.sendResponse(e); } catch (IOException inner) { inner.addSuppressed(e); - logger.warn("Failed to send error message back to client for action [{}]", inner, action); + logger.warn( + (Supplier) () -> new ParameterizedMessage( + "Failed to send error message back to client for action [{}]", action), inner); } } return action; @@ -1343,7 +1361,9 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i transportChannel.sendResponse(e); } catch (Exception inner) { inner.addSuppressed(e); - logger.warn("Failed to send error message back to client for action [{}]", inner, reg.getAction()); + logger.warn( + (Supplier) () -> new ParameterizedMessage( + "Failed to send error message back to client for action [{}]", reg.getAction()), inner); } } } diff --git a/core/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java b/core/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java index 5f302964099..3d46c0853ec 100644 --- a/core/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java +++ b/core/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java @@ -19,7 +19,8 @@ package org.elasticsearch.transport; -import org.elasticsearch.common.logging.ESLogger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; @@ -30,12 +31,12 @@ import java.util.function.Supplier; */ public class TransportChannelResponseHandler implements TransportResponseHandler { - private final ESLogger logger; + private final Logger logger; private final TransportChannel channel; private final String extraInfoOnError; private final Supplier responseSupplier; - public TransportChannelResponseHandler(ESLogger logger, TransportChannel channel, String extraInfoOnError, + public TransportChannelResponseHandler(Logger logger, TransportChannel channel, String extraInfoOnError, Supplier responseSupplier) { this.logger = logger; this.channel = channel; @@ -62,7 +63,12 @@ public class TransportChannelResponseHandler implem try { channel.sendResponse(exp); } catch (IOException e) { - logger.debug("failed to send failure {}", e, extraInfoOnError == null ? "" : "(" + extraInfoOnError + ")"); + logger.debug( + (org.apache.logging.log4j.util.Supplier) + () -> new ParameterizedMessage( + "failed to send failure {}", + extraInfoOnError == null ? "" : "(" + extraInfoOnError + ")"), + e); } } diff --git a/core/src/main/java/org/elasticsearch/transport/TransportInfo.java b/core/src/main/java/org/elasticsearch/transport/TransportInfo.java index 236c0d50a98..fbabf49b65d 100644 --- a/core/src/main/java/org/elasticsearch/transport/TransportInfo.java +++ b/core/src/main/java/org/elasticsearch/transport/TransportInfo.java @@ -22,7 +22,7 @@ package org.elasticsearch.transport; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -31,56 +31,17 @@ import java.io.IOException; import java.util.HashMap; import java.util.Map; -/** - * - */ -public class TransportInfo implements Streamable, ToXContent { +public class TransportInfo implements Writeable, ToXContent { private BoundTransportAddress address; private Map profileAddresses; - TransportInfo() { - } - public TransportInfo(BoundTransportAddress address, @Nullable Map profileAddresses) { this.address = address; this.profileAddresses = profileAddresses; } - static final class Fields { - static final String TRANSPORT = "transport"; - static final String BOUND_ADDRESS = "bound_address"; - static final String PUBLISH_ADDRESS = "publish_address"; - static final String PROFILES = "profiles"; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(Fields.TRANSPORT); - builder.array(Fields.BOUND_ADDRESS, (Object[]) address.boundAddresses()); - builder.field(Fields.PUBLISH_ADDRESS, address.publishAddress().toString()); - builder.startObject(Fields.PROFILES); - if (profileAddresses != null && profileAddresses.size() > 0) { - for (Map.Entry entry : profileAddresses.entrySet()) { - builder.startObject(entry.getKey()); - builder.array(Fields.BOUND_ADDRESS, (Object[]) entry.getValue().boundAddresses()); - builder.field(Fields.PUBLISH_ADDRESS, entry.getValue().publishAddress().toString()); - builder.endObject(); - } - } - builder.endObject(); - builder.endObject(); - return builder; - } - - public static TransportInfo readTransportInfo(StreamInput in) throws IOException { - TransportInfo info = new TransportInfo(); - info.readFrom(in); - return info; - } - - @Override - public void readFrom(StreamInput in) throws IOException { + public TransportInfo(StreamInput in) throws IOException { address = BoundTransportAddress.readBoundTransportAddress(in); int size = in.readVInt(); if (size > 0) { @@ -109,6 +70,32 @@ public class TransportInfo implements Streamable, ToXContent { } } + static final class Fields { + static final String TRANSPORT = "transport"; + static final String BOUND_ADDRESS = "bound_address"; + static final String PUBLISH_ADDRESS = "publish_address"; + static final String PROFILES = "profiles"; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(Fields.TRANSPORT); + builder.array(Fields.BOUND_ADDRESS, (Object[]) address.boundAddresses()); + builder.field(Fields.PUBLISH_ADDRESS, address.publishAddress().toString()); + builder.startObject(Fields.PROFILES); + if (profileAddresses != null && profileAddresses.size() > 0) { + for (Map.Entry entry : profileAddresses.entrySet()) { + builder.startObject(entry.getKey()); + builder.array(Fields.BOUND_ADDRESS, (Object[]) entry.getValue().boundAddresses()); + builder.field(Fields.PUBLISH_ADDRESS, entry.getValue().publishAddress().toString()); + builder.endObject(); + } + } + builder.endObject(); + builder.endObject(); + return builder; + } + public BoundTransportAddress address() { return address; } diff --git a/core/src/main/java/org/elasticsearch/transport/TransportService.java b/core/src/main/java/org/elasticsearch/transport/TransportService.java index 15164a5d201..1c807553a24 100644 --- a/core/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/core/src/main/java/org/elasticsearch/transport/TransportService.java @@ -19,6 +19,8 @@ package org.elasticsearch.transport; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction; import org.elasticsearch.cluster.ClusterName; @@ -29,7 +31,6 @@ import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.common.regex.Regex; @@ -108,7 +109,7 @@ public class TransportService extends AbstractLifecycleComponent { listSetting("transport.tracer.exclude", Arrays.asList("internal:discovery/zen/fd*", TransportLivenessAction.NAME), Function.identity(), Property.Dynamic, Property.NodeScope); - private final ESLogger tracerLog; + private final Logger tracerLog; volatile String[] tracerLogInclude; volatile String[] tracelLogExclude; @@ -205,11 +206,19 @@ public class TransportService extends AbstractLifecycleComponent { @Override public void onRejection(Exception e) { // if we get rejected during node shutdown we don't wanna bubble it up - logger.debug("failed to notify response handler on rejection, action: {}", e, holderToNotify.action()); + logger.debug( + (Supplier) () -> new ParameterizedMessage( + "failed to notify response handler on rejection, action: {}", + holderToNotify.action()), + e); } @Override public void onFailure(Exception e) { - logger.warn("failed to notify response handler on exception, action: {}", e, holderToNotify.action()); + logger.warn( + (Supplier) () -> new ParameterizedMessage( + "failed to notify response handler on exception, action: {}", + holderToNotify.action()), + e); } @Override public void doRun() { @@ -483,11 +492,19 @@ public class TransportService extends AbstractLifecycleComponent { @Override public void onRejection(Exception e) { // if we get rejected during node shutdown we don't wanna bubble it up - logger.debug("failed to notify response handler on rejection, action: {}", e, holderToNotify.action()); + logger.debug( + (Supplier) () -> new ParameterizedMessage( + "failed to notify response handler on rejection, action: {}", + holderToNotify.action()), + e); } @Override public void onFailure(Exception e) { - logger.warn("failed to notify response handler on exception, action: {}", e, holderToNotify.action()); + logger.warn( + (Supplier) () -> new ParameterizedMessage( + "failed to notify response handler on exception, action: {}", + holderToNotify.action()), + e); } @Override protected void doRun() throws Exception { @@ -528,7 +545,9 @@ public class TransportService extends AbstractLifecycleComponent { channel.sendResponse(e); } catch (Exception inner) { inner.addSuppressed(e); - logger.warn("failed to notify channel of error message for action [{}]", inner, action); + logger.warn( + (Supplier) () -> new ParameterizedMessage( + "failed to notify channel of error message for action [{}]", action), inner); } } }); @@ -539,7 +558,9 @@ public class TransportService extends AbstractLifecycleComponent { channel.sendResponse(e); } catch (Exception inner) { inner.addSuppressed(e); - logger.warn("failed to notify channel of error message for action [{}]", inner, action); + logger.warn( + (Supplier) () -> new ParameterizedMessage( + "failed to notify channel of error message for action [{}]", action), inner); } } @@ -661,7 +682,9 @@ public class TransportService extends AbstractLifecycleComponent { } protected void traceResponseSent(long requestId, String action, Exception e) { - tracerLog.trace("[{}][{}] sent error response", e, requestId, action); + tracerLog.trace( + (org.apache.logging.log4j.util.Supplier) + () -> new ParameterizedMessage("[{}][{}] sent error response", requestId, action), e); } @Override @@ -941,14 +964,14 @@ public class TransportService extends AbstractLifecycleComponent { } static class DirectResponseChannel implements TransportChannel { - final ESLogger logger; + final Logger logger; final DiscoveryNode localNode; private final String action; private final long requestId; final TransportServiceAdapter adapter; final ThreadPool threadPool; - public DirectResponseChannel(ESLogger logger, DiscoveryNode localNode, String action, long requestId, + public DirectResponseChannel(Logger logger, DiscoveryNode localNode, String action, long requestId, TransportServiceAdapter adapter, ThreadPool threadPool) { this.logger = logger; this.localNode = localNode; @@ -1034,7 +1057,9 @@ public class TransportService extends AbstractLifecycleComponent { try { handler.handleException(rtx); } catch (Exception e) { - logger.error("failed to handle exception for action [{}], handler [{}]", e, action, handler); + logger.error( + (Supplier) () -> new ParameterizedMessage( + "failed to handle exception for action [{}], handler [{}]", action, handler), e); } } diff --git a/core/src/main/java/org/elasticsearch/transport/local/LocalTransport.java b/core/src/main/java/org/elasticsearch/transport/local/LocalTransport.java index 61559442ff3..e556ec9676a 100644 --- a/core/src/main/java/org/elasticsearch/transport/local/LocalTransport.java +++ b/core/src/main/java/org/elasticsearch/transport/local/LocalTransport.java @@ -19,6 +19,8 @@ package org.elasticsearch.transport.local; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -71,9 +73,6 @@ import java.util.concurrent.atomic.AtomicLong; import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; -/** - * - */ public class LocalTransport extends AbstractLifecycleComponent implements Transport { public static final String LOCAL_TRANSPORT_THREAD_NAME_PREFIX = "local_transport"; @@ -306,7 +305,7 @@ public class LocalTransport extends AbstractLifecycleComponent implements Transp }); } } else { - logger.warn("Failed to receive message for action [{}]", e, action); + logger.warn((Supplier) () -> new ParameterizedMessage("Failed to receive message for action [{}]", action), e); } } } @@ -355,7 +354,9 @@ public class LocalTransport extends AbstractLifecycleComponent implements Transp transportChannel.sendResponse(e); } catch (Exception inner) { inner.addSuppressed(e); - logger.warn("Failed to send error message back to client for action [{}]", inner, action); + logger.warn( + (Supplier) () -> new ParameterizedMessage( + "Failed to send error message back to client for action [{}]", action), inner); } } } @@ -366,7 +367,9 @@ public class LocalTransport extends AbstractLifecycleComponent implements Transp transportChannel.sendResponse(e); } catch (Exception inner) { inner.addSuppressed(e); - logger.warn("Failed to send error message back to client for action [{}]", inner, action); + logger.warn( + (Supplier) () -> new ParameterizedMessage( + "Failed to send error message back to client for action [{}]", action), inner); } } @@ -414,7 +417,7 @@ public class LocalTransport extends AbstractLifecycleComponent implements Transp try { handler.handleException(rtx); } catch (Exception e) { - logger.error("failed to handle exception response [{}]", e, handler); + logger.error((Supplier) () -> new ParameterizedMessage("failed to handle exception response [{}]", handler), e); } } diff --git a/core/src/main/java/org/elasticsearch/tribe/TribeService.java b/core/src/main/java/org/elasticsearch/tribe/TribeService.java index 3ca80155270..99f6c696e66 100644 --- a/core/src/main/java/org/elasticsearch/tribe/TribeService.java +++ b/core/src/main/java/org/elasticsearch/tribe/TribeService.java @@ -19,6 +19,8 @@ package org.elasticsearch.tribe; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; @@ -274,7 +276,7 @@ public class TribeService extends AbstractLifecycleComponent { otherNode.close(); } catch (Exception inner) { inner.addSuppressed(e); - logger.warn("failed to close node {} on failed start", inner, otherNode); + logger.warn((Supplier) () -> new ParameterizedMessage("failed to close node {} on failed start", otherNode), inner); } } if (e instanceof RuntimeException) { @@ -296,7 +298,7 @@ public class TribeService extends AbstractLifecycleComponent { try { node.close(); } catch (Exception e) { - logger.warn("failed to close node {}", e, node); + logger.warn((Supplier) () -> new ParameterizedMessage("failed to close node {}", node), e); } } } @@ -320,7 +322,7 @@ public class TribeService extends AbstractLifecycleComponent { event, ClusterStateTaskConfig.build(Priority.NORMAL), executor, - (source, e) -> logger.warn("failed to process [{}]", e, source)); + (source, e) -> logger.warn((Supplier) () -> new ParameterizedMessage("failed to process [{}]", source), e)); } } diff --git a/core/src/main/java/org/elasticsearch/watcher/FileWatcher.java b/core/src/main/java/org/elasticsearch/watcher/FileWatcher.java index a6b0bdd8401..8d5c04e7708 100644 --- a/core/src/main/java/org/elasticsearch/watcher/FileWatcher.java +++ b/core/src/main/java/org/elasticsearch/watcher/FileWatcher.java @@ -18,8 +18,8 @@ */ package org.elasticsearch.watcher; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.io.FileSystemUtils; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import java.io.IOException; @@ -38,7 +38,7 @@ public class FileWatcher extends AbstractResourceWatcher { private FileObserver rootFileObserver; private Path file; - private static final ESLogger logger = Loggers.getLogger(FileWatcher.class); + private static final Logger logger = Loggers.getLogger(FileWatcher.class); /** * Creates new file watcher on the given directory diff --git a/core/src/test/java/org/apache/log4j/Java9HackTests.java b/core/src/test/java/org/apache/log4j/Java9HackTests.java deleted file mode 100644 index e917f1d3060..00000000000 --- a/core/src/test/java/org/apache/log4j/Java9HackTests.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.log4j; - -import org.elasticsearch.test.ESTestCase; - -public class Java9HackTests extends ESTestCase { - public void testJava9Hack() { - assertNotNull(MDC.mdc.tlm != null); - } -} diff --git a/core/src/test/java/org/elasticsearch/action/IndicesRequestIT.java b/core/src/test/java/org/elasticsearch/action/IndicesRequestIT.java index 934fdae254b..1a11e3f4803 100644 --- a/core/src/test/java/org/elasticsearch/action/IndicesRequestIT.java +++ b/core/src/test/java/org/elasticsearch/action/IndicesRequestIT.java @@ -69,6 +69,7 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.action.support.replication.TransportReplicationActionTests; import org.elasticsearch.action.termvectors.MultiTermVectorsAction; import org.elasticsearch.action.termvectors.MultiTermVectorsRequest; import org.elasticsearch.action.termvectors.TermVectorsAction; @@ -117,7 +118,6 @@ import static org.hamcrest.Matchers.emptyIterable; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasItem; -import static org.hamcrest.Matchers.instanceOf; @ClusterScope(scope = Scope.SUITE, numClientNodes = 1, minNumDataNodes = 2) public class IndicesRequestIT extends ESIntegTestCase { @@ -638,8 +638,7 @@ public class IndicesRequestIT extends ESIntegTestCase { assertThat("no internal requests intercepted for action [" + action + "]", requests.size(), greaterThan(0)); } for (TransportRequest internalRequest : requests) { - assertThat(internalRequest, instanceOf(IndicesRequest.class)); - IndicesRequest indicesRequest = (IndicesRequest) internalRequest; + IndicesRequest indicesRequest = convertRequest(internalRequest); assertThat(internalRequest.getClass().getName(), indicesRequest.indices(), equalTo(originalRequest.indices())); assertThat(indicesRequest.indicesOptions(), equalTo(originalRequest.indicesOptions())); } @@ -651,14 +650,24 @@ public class IndicesRequestIT extends ESIntegTestCase { List requests = consumeTransportRequests(action); assertThat("no internal requests intercepted for action [" + action + "]", requests.size(), greaterThan(0)); for (TransportRequest internalRequest : requests) { - assertThat(internalRequest, instanceOf(IndicesRequest.class)); - for (String index : ((IndicesRequest) internalRequest).indices()) { + IndicesRequest indicesRequest = convertRequest(internalRequest); + for (String index : indicesRequest.indices()) { assertThat(indices, hasItem(index)); } } } } + static IndicesRequest convertRequest(TransportRequest request) { + final IndicesRequest indicesRequest; + if (request instanceof IndicesRequest) { + indicesRequest = (IndicesRequest) request; + } else { + indicesRequest = TransportReplicationActionTests.resolveRequest(request); + } + return indicesRequest; + } + private String randomIndexOrAlias() { String index = randomFrom(indices); if (randomBoolean()) { diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java index f2ed690bb9c..2690dc63bdc 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java @@ -21,12 +21,15 @@ package org.elasticsearch.action.admin.cluster.stats; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; +import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.client.Requests; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.monitor.os.OsStats; import org.elasticsearch.node.Node; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; @@ -181,6 +184,20 @@ public class ClusterStatsIT extends ESIntegTestCase { assertThat(msg, response.nodesStats.getProcess().getMinOpenFileDescriptors(), Matchers.greaterThanOrEqualTo(-1L)); assertThat(msg, response.nodesStats.getProcess().getMaxOpenFileDescriptors(), Matchers.greaterThanOrEqualTo(-1L)); + NodesStatsResponse nodesStatsResponse = client().admin().cluster().prepareNodesStats().setOs(true).get(); + long total = 0; + long free = 0; + long used = 0; + for (NodeStats nodeStats : nodesStatsResponse.getNodes()) { + total += nodeStats.getOs().getMem().getTotal().bytes(); + free += nodeStats.getOs().getMem().getFree().bytes(); + used += nodeStats.getOs().getMem().getUsed().bytes(); + } + assertEquals(msg, free, response.nodesStats.getOs().getMem().getFree().bytes()); + assertEquals(msg, total, response.nodesStats.getOs().getMem().getTotal().bytes()); + assertEquals(msg, used, response.nodesStats.getOs().getMem().getUsed().bytes()); + assertEquals(msg, OsStats.calculatePercentage(used, total), response.nodesStats.getOs().getMem().getUsedPercent()); + assertEquals(msg, OsStats.calculatePercentage(free, total), response.nodesStats.getOs().getMem().getFreePercent()); } public void testAllocatedProcessors() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java b/core/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java index 05e32bbfdde..03cf86397c2 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java @@ -60,14 +60,6 @@ import static org.hamcrest.Matchers.nullValue; public class BulkWithUpdatesIT extends ESIntegTestCase { - @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put("script.default_lang", CustomScriptPlugin.NAME) - .build(); - } - @Override protected Collection> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); @@ -557,6 +549,7 @@ public class BulkWithUpdatesIT extends ESIntegTestCase { " \"script\" : {" + " \"inline\" : \"ctx._source.field2 = 'value2'\"" + " }," + + " \"lang\" : \"" + CustomScriptPlugin.NAME + "\"," + " \"upsert\" : {" + " \"field1\" : \"value1'\"" + " }" + @@ -589,7 +582,9 @@ public class BulkWithUpdatesIT extends ESIntegTestCase { assertThat(bulkResponse.getItems().length, equalTo(3)); assertThat(bulkResponse.getItems()[0].isFailed(), equalTo(false)); assertThat(bulkResponse.getItems()[1].isFailed(), equalTo(false)); - assertThat(bulkResponse.getItems()[2].isFailed(), equalTo(false)); + assertThat(bulkResponse.getItems()[2].isFailed(), equalTo(true)); + assertThat(bulkResponse.getItems()[2].getFailure().getCause().getCause().getMessage(), + equalTo("script_lang not supported [painless]")); client().admin().indices().prepareRefresh("test").get(); diff --git a/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java b/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java index a8699dd3ea7..355daf40683 100644 --- a/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java @@ -141,4 +141,13 @@ public class IndexRequestTests extends ESTestCase { // test negative shard count value not allowed expectThrows(IllegalArgumentException.class, () -> request.waitForActiveShards(ActiveShardCount.from(randomIntBetween(-10, -1)))); } + + public void testAutoGenIdTimestampIsSet() { + IndexRequest request = new IndexRequest("index", "type"); + request.process(null, true, "index"); + assertTrue("expected > 0 but got: " + request.getAutoGeneratedTimestamp(), request.getAutoGeneratedTimestamp() > 0); + request = new IndexRequest("index", "type", "1"); + request.process(null, true, "index"); + assertEquals(IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, request.getAutoGeneratedTimestamp()); + } } diff --git a/core/src/test/java/org/elasticsearch/action/support/AutoCreateIndexTests.java b/core/src/test/java/org/elasticsearch/action/support/AutoCreateIndexTests.java index 9572a2df652..8a45ca47535 100644 --- a/core/src/test/java/org/elasticsearch/action/support/AutoCreateIndexTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/AutoCreateIndexTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.test.ESTestCase; @@ -35,10 +36,12 @@ public class AutoCreateIndexTests extends ESTestCase { public void testParseFailed() { try { - new AutoCreateIndex(Settings.builder().put("action.auto_create_index", ",,,").build(), new IndexNameExpressionResolver(Settings.EMPTY)); + Settings settings = Settings.builder().put("action.auto_create_index", ",,,").build(); + newAutoCreateIndex(settings); fail("initialization should have failed"); } catch (IllegalArgumentException ex) { - assertEquals("Can't parse [,,,] for setting [action.auto_create_index] must be either [true, false, or a comma separated list of index patterns]", ex.getMessage()); + assertEquals("Can't parse [,,,] for setting [action.auto_create_index] must be either [true, false, or a " + + "comma separated list of index patterns]", ex.getMessage()); } } @@ -46,46 +49,51 @@ public class AutoCreateIndexTests extends ESTestCase { String prefix = randomFrom("+", "-"); Settings settings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), prefix).build(); try { - new AutoCreateIndex(settings, new IndexNameExpressionResolver(settings)); + newAutoCreateIndex(settings); fail("initialization should have failed"); } catch(IllegalArgumentException ex) { - assertEquals("Can't parse [" + prefix + "] for setting [action.auto_create_index] must contain an index name after [" + prefix + "]", ex.getMessage()); + assertEquals("Can't parse [" + prefix + "] for setting [action.auto_create_index] must contain an index name after [" + + prefix + "]", ex.getMessage()); } } public void testAutoCreationDisabled() { Settings settings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), false).build(); - AutoCreateIndex autoCreateIndex = new AutoCreateIndex(settings, new IndexNameExpressionResolver(Settings.EMPTY)); + AutoCreateIndex autoCreateIndex = newAutoCreateIndex(settings); assertThat(autoCreateIndex.shouldAutoCreate(randomAsciiOfLengthBetween(1, 10), buildClusterState()), equalTo(false)); } public void testAutoCreationEnabled() { Settings settings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), true).build(); - AutoCreateIndex autoCreateIndex = new AutoCreateIndex(settings, new IndexNameExpressionResolver(Settings.EMPTY)); + AutoCreateIndex autoCreateIndex = newAutoCreateIndex(settings); assertThat(autoCreateIndex.shouldAutoCreate(randomAsciiOfLengthBetween(1, 10), buildClusterState()), equalTo(true)); } public void testDefaultAutoCreation() { - AutoCreateIndex autoCreateIndex = new AutoCreateIndex(Settings.EMPTY, new IndexNameExpressionResolver(Settings.EMPTY)); + AutoCreateIndex autoCreateIndex = newAutoCreateIndex(Settings.EMPTY); assertThat(autoCreateIndex.shouldAutoCreate(randomAsciiOfLengthBetween(1, 10), buildClusterState()), equalTo(true)); } public void testExistingIndex() { - Settings settings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), randomFrom(true, false, randomAsciiOfLengthBetween(7, 10))).build(); - AutoCreateIndex autoCreateIndex = new AutoCreateIndex(settings, new IndexNameExpressionResolver(Settings.EMPTY)); - assertThat(autoCreateIndex.shouldAutoCreate(randomFrom("index1", "index2", "index3"), buildClusterState("index1", "index2", "index3")), equalTo(false)); + Settings settings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), randomFrom(true, false, + randomAsciiOfLengthBetween(7, 10))).build(); + AutoCreateIndex autoCreateIndex = newAutoCreateIndex(settings); + assertThat(autoCreateIndex.shouldAutoCreate(randomFrom("index1", "index2", "index3"), + buildClusterState("index1", "index2", "index3")), equalTo(false)); } public void testDynamicMappingDisabled() { - Settings settings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), randomFrom(true, randomAsciiOfLengthBetween(1, 10))) + Settings settings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), randomFrom(true, + randomAsciiOfLengthBetween(1, 10))) .put(MapperService.INDEX_MAPPER_DYNAMIC_SETTING.getKey(), false).build(); - AutoCreateIndex autoCreateIndex = new AutoCreateIndex(settings, new IndexNameExpressionResolver(Settings.EMPTY)); + AutoCreateIndex autoCreateIndex = newAutoCreateIndex(settings); assertThat(autoCreateIndex.shouldAutoCreate(randomAsciiOfLengthBetween(1, 10), buildClusterState()), equalTo(false)); } public void testAutoCreationPatternEnabled() { - Settings settings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), randomFrom("+index*", "index*")).build(); - AutoCreateIndex autoCreateIndex = new AutoCreateIndex(settings, new IndexNameExpressionResolver(settings)); + Settings settings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), randomFrom("+index*", "index*")) + .build(); + AutoCreateIndex autoCreateIndex = newAutoCreateIndex(settings); ClusterState clusterState = ClusterState.builder(new ClusterName("test")).metaData(MetaData.builder()).build(); assertThat(autoCreateIndex.shouldAutoCreate("index" + randomAsciiOfLengthBetween(1, 5), clusterState), equalTo(true)); assertThat(autoCreateIndex.shouldAutoCreate("does_not_match" + randomAsciiOfLengthBetween(1, 5), clusterState), equalTo(false)); @@ -93,7 +101,7 @@ public class AutoCreateIndexTests extends ESTestCase { public void testAutoCreationPatternDisabled() { Settings settings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), "-index*").build(); - AutoCreateIndex autoCreateIndex = new AutoCreateIndex(settings, new IndexNameExpressionResolver(settings)); + AutoCreateIndex autoCreateIndex = newAutoCreateIndex(settings); ClusterState clusterState = ClusterState.builder(new ClusterName("test")).metaData(MetaData.builder()).build(); assertThat(autoCreateIndex.shouldAutoCreate("index" + randomAsciiOfLengthBetween(1, 5), clusterState), equalTo(false)); //default is false when patterns are specified @@ -101,8 +109,9 @@ public class AutoCreateIndexTests extends ESTestCase { } public void testAutoCreationMultiplePatternsWithWildcards() { - Settings settings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), randomFrom("+test*,-index*", "test*,-index*")).build(); - AutoCreateIndex autoCreateIndex = new AutoCreateIndex(settings, new IndexNameExpressionResolver(settings)); + Settings settings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), + randomFrom("+test*,-index*", "test*,-index*")).build(); + AutoCreateIndex autoCreateIndex = newAutoCreateIndex(settings); ClusterState clusterState = ClusterState.builder(new ClusterName("test")).metaData(MetaData.builder()).build(); assertThat(autoCreateIndex.shouldAutoCreate("index" + randomAsciiOfLengthBetween(1, 5), clusterState), equalTo(false)); assertThat(autoCreateIndex.shouldAutoCreate("test" + randomAsciiOfLengthBetween(1, 5), clusterState), equalTo(true)); @@ -111,7 +120,7 @@ public class AutoCreateIndexTests extends ESTestCase { public void testAutoCreationMultiplePatternsNoWildcards() { Settings settings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), "+test1,-index1").build(); - AutoCreateIndex autoCreateIndex = new AutoCreateIndex(settings, new IndexNameExpressionResolver(settings)); + AutoCreateIndex autoCreateIndex = newAutoCreateIndex(settings); ClusterState clusterState = ClusterState.builder(new ClusterName("test")).metaData(MetaData.builder()).build(); assertThat(autoCreateIndex.shouldAutoCreate("test1", clusterState), equalTo(true)); assertThat(autoCreateIndex.shouldAutoCreate("index" + randomAsciiOfLengthBetween(1, 5), clusterState), equalTo(false)); @@ -121,7 +130,7 @@ public class AutoCreateIndexTests extends ESTestCase { public void testAutoCreationMultipleIndexNames() { Settings settings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), "test1,test2").build(); - AutoCreateIndex autoCreateIndex = new AutoCreateIndex(settings, new IndexNameExpressionResolver(settings)); + AutoCreateIndex autoCreateIndex = newAutoCreateIndex(settings); ClusterState clusterState = ClusterState.builder(new ClusterName("test")).metaData(MetaData.builder()).build(); assertThat(autoCreateIndex.shouldAutoCreate("test1", clusterState), equalTo(true)); assertThat(autoCreateIndex.shouldAutoCreate("test2", clusterState), equalTo(true)); @@ -129,19 +138,51 @@ public class AutoCreateIndexTests extends ESTestCase { } public void testAutoCreationConflictingPatternsFirstWins() { - Settings settings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), "+test1,-test1,-test2,+test2").build(); - AutoCreateIndex autoCreateIndex = new AutoCreateIndex(settings, new IndexNameExpressionResolver(settings)); + Settings settings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), + "+test1,-test1,-test2,+test2").build(); + AutoCreateIndex autoCreateIndex = newAutoCreateIndex(settings); ClusterState clusterState = ClusterState.builder(new ClusterName("test")).metaData(MetaData.builder()).build(); assertThat(autoCreateIndex.shouldAutoCreate("test1", clusterState), equalTo(true)); assertThat(autoCreateIndex.shouldAutoCreate("test2", clusterState), equalTo(false)); assertThat(autoCreateIndex.shouldAutoCreate("does_not_match" + randomAsciiOfLengthBetween(1, 5), clusterState), equalTo(false)); } + public void testUpdate() { + boolean value = randomBoolean(); + Settings settings; + if (value && randomBoolean()) { + settings = Settings.EMPTY; + } else { + settings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), value).build(); + } + + ClusterSettings clusterSettings = new ClusterSettings(settings, + ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + AutoCreateIndex autoCreateIndex = new AutoCreateIndex(settings, clusterSettings, new IndexNameExpressionResolver(settings)); + assertThat(autoCreateIndex.getAutoCreate().isAutoCreateIndex(), equalTo(value)); + + Settings newSettings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), !value).build(); + clusterSettings.applySettings(newSettings); + assertThat(autoCreateIndex.getAutoCreate().isAutoCreateIndex(), equalTo(!value)); + + newSettings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), "logs-*").build(); + clusterSettings.applySettings(newSettings); + assertThat(autoCreateIndex.getAutoCreate().isAutoCreateIndex(), equalTo(true)); + assertThat(autoCreateIndex.getAutoCreate().getExpressions().size(), equalTo(1)); + assertThat(autoCreateIndex.getAutoCreate().getExpressions().get(0).v1(), equalTo("logs-*")); + } + private static ClusterState buildClusterState(String... indices) { MetaData.Builder metaData = MetaData.builder(); for (String index : indices) { metaData.put(IndexMetaData.builder(index).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1)); } - return ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).build(); + return ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metaData(metaData).build(); + } + + private AutoCreateIndex newAutoCreateIndex(Settings settings) { + return new AutoCreateIndex(settings, new ClusterSettings(settings, + ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), new IndexNameExpressionResolver(settings)); } } diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java index 23cf7be8cee..b5edc1b53c5 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.action.support.replication; +import org.apache.logging.log4j.Logger; import org.apache.lucene.index.CorruptIndexException; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; @@ -34,7 +35,6 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.shard.IndexShardNotStartedException; @@ -491,7 +491,7 @@ public class ReplicationOperationTests extends ESTestCase { public TestReplicationOperation(Request request, Primary primary, ActionListener listener, boolean executeOnReplicas, - Replicas replicas, Supplier clusterStateSupplier, ESLogger logger, String opType) { + Replicas replicas, Supplier clusterStateSupplier, Logger logger, String opType) { super(request, primary, listener, executeOnReplicas, replicas, clusterStateSupplier, logger, opType); } } diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index 9b896603c66..bda117642a0 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.transport.NoNodeAvailableException; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ESAllocationTestCase; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -36,6 +37,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; @@ -47,21 +49,25 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.EngineClosedException; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardClosedException; +import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardNotFoundException; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.cluster.ESAllocationTestCase; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.CapturingTransport; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportResponseOptions; import org.elasticsearch.transport.TransportService; @@ -75,12 +81,12 @@ import java.io.IOException; import java.util.Arrays; import java.util.HashSet; import java.util.List; +import java.util.Locale; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Consumer; import java.util.stream.Collectors; import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.state; @@ -93,12 +99,32 @@ import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyInt; +import static org.mockito.Matchers.anyLong; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; public class TransportReplicationActionTests extends ESTestCase { + /** + * takes a request that was sent by a {@link TransportReplicationAction} and captured + * and returns the underlying request if it's wrapped or the original (cast to the expected type). + * + * This will throw a {@link ClassCastException} if the request is of the wrong type. + */ + public static R resolveRequest(TransportRequest requestOrWrappedRequest) { + if (requestOrWrappedRequest instanceof TransportReplicationAction.ConcreteShardRequest) { + requestOrWrappedRequest = ((TransportReplicationAction.ConcreteShardRequest)requestOrWrappedRequest).getRequest(); + } + return (R) requestOrWrappedRequest; + } + private static ThreadPool threadPool; private ClusterService clusterService; @@ -169,12 +195,14 @@ public class TransportReplicationActionTests extends ESTestCase { reroutePhase.run(); assertListenerThrows("failed to timeout on retryable block", listener, ClusterBlockException.class); assertPhase(task, "failed"); + assertFalse(request.isRetrySet.get()); listener = new PlainActionFuture<>(); - reroutePhase = action.new ReroutePhase(task, new Request(), listener); + reroutePhase = action.new ReroutePhase(task, request = new Request(), listener); reroutePhase.run(); assertFalse("primary phase should wait on retryable block", listener.isDone()); assertPhase(task, "waiting_for_retry"); + assertTrue(request.isRetrySet.get()); block = ClusterBlocks.builder() .addGlobalBlock(new ClusterBlock(1, "non retryable", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL)); @@ -204,6 +232,7 @@ public class TransportReplicationActionTests extends ESTestCase { reroutePhase.run(); assertListenerThrows("unassigned primary didn't cause a timeout", listener, UnavailableShardsException.class); assertPhase(task, "failed"); + assertTrue(request.isRetrySet.get()); request = new Request(shardId); listener = new PlainActionFuture<>(); @@ -211,6 +240,7 @@ public class TransportReplicationActionTests extends ESTestCase { reroutePhase.run(); assertFalse("unassigned primary didn't cause a retry", listener.isDone()); assertPhase(task, "waiting_for_retry"); + assertTrue(request.isRetrySet.get()); setState(clusterService, state(index, true, ShardRoutingState.STARTED)); logger.debug("--> primary assigned state:\n{}", clusterService.state().prettyPrint()); @@ -249,12 +279,14 @@ public class TransportReplicationActionTests extends ESTestCase { Action.ReroutePhase reroutePhase = action.new ReroutePhase(null, request, listener); reroutePhase.run(); assertListenerThrows("cluster state too old didn't cause a timeout", listener, UnavailableShardsException.class); + assertTrue(request.isRetrySet.compareAndSet(true, false)); request = new Request(shardId).routedBasedOnClusterVersion(clusterService.state().version() + 1); listener = new PlainActionFuture<>(); reroutePhase = action.new ReroutePhase(null, request, listener); reroutePhase.run(); assertFalse("cluster state too old didn't cause a retry", listener.isDone()); + assertTrue(request.isRetrySet.get()); // finish relocation ShardRouting relocationTarget = clusterService.state().getRoutingTable().shardRoutingTable(shardId) @@ -290,11 +322,14 @@ public class TransportReplicationActionTests extends ESTestCase { reroutePhase.run(); assertListenerThrows("must throw index not found exception", listener, IndexNotFoundException.class); assertPhase(task, "failed"); + assertTrue(request.isRetrySet.get()); request = new Request(new ShardId(index, "_na_", 10)).timeout("1ms"); listener = new PlainActionFuture<>(); reroutePhase = action.new ReroutePhase(null, request, listener); reroutePhase.run(); assertListenerThrows("must throw shard not found exception", listener, ShardNotFoundException.class); + assertFalse(request.isRetrySet.get()); //TODO I'd have expected this to be true but we fail too early? + } public void testStalePrimaryShardOnReroute() throws InterruptedException { @@ -319,6 +354,7 @@ public class TransportReplicationActionTests extends ESTestCase { assertThat(capturedRequests, arrayWithSize(1)); assertThat(capturedRequests[0].action, equalTo("testAction[p]")); assertPhase(task, "waiting_on_primary"); + assertFalse(request.isRetrySet.get()); transport.handleRemoteError(capturedRequests[0].requestId, randomRetryPrimaryException(shardId)); @@ -380,6 +416,7 @@ public class TransportReplicationActionTests extends ESTestCase { assertThat(capturedRequests.get(0).action, equalTo("testAction")); assertPhase(task, "rerouted"); } + assertFalse(request.isRetrySet.get()); assertIndexShardUninitialized(); } @@ -400,7 +437,7 @@ public class TransportReplicationActionTests extends ESTestCase { isRelocated.set(true); executeOnPrimary = false; } - action.new AsyncPrimaryAction(request, createTransportChannel(listener), task) { + action.new AsyncPrimaryAction(request, primaryShard.allocationId().getId(), createTransportChannel(listener), task) { @Override protected ReplicationOperation createReplicatedOperation(Request request, ActionListener actionListener, Action.PrimaryShardReference primaryShardReference, @@ -419,6 +456,7 @@ public class TransportReplicationActionTests extends ESTestCase { assertTrue(listener.isDone()); listener.get(); assertPhase(task, "finished"); + assertFalse(request.isRetrySet.get()); } else { assertFalse(executed.get()); assertIndexShardCounter(0); // it should have been freed. @@ -432,6 +470,7 @@ public class TransportReplicationActionTests extends ESTestCase { assertTrue(listener.isDone()); listener.get(); assertPhase(task, "finished"); + assertFalse(request.isRetrySet.get()); } } @@ -439,7 +478,8 @@ public class TransportReplicationActionTests extends ESTestCase { final String index = "test"; final ShardId shardId = new ShardId(index, "_na_", 0); ClusterState state = state(index, true, ShardRoutingState.RELOCATING); - String primaryTargetNodeId = state.getRoutingTable().shardRoutingTable(shardId).primaryShard().relocatingNodeId(); + final ShardRouting primaryShard = state.getRoutingTable().shardRoutingTable(shardId).primaryShard(); + String primaryTargetNodeId = primaryShard.relocatingNodeId(); // simulate execution of the primary phase on the relocation target node state = ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(primaryTargetNodeId)).build(); setState(clusterService, state); @@ -447,7 +487,7 @@ public class TransportReplicationActionTests extends ESTestCase { PlainActionFuture listener = new PlainActionFuture<>(); ReplicationTask task = maybeTask(); AtomicBoolean executed = new AtomicBoolean(); - action.new AsyncPrimaryAction(request, createTransportChannel(listener), task) { + action.new AsyncPrimaryAction(request, primaryShard.allocationId().getRelocationId(), createTransportChannel(listener), task) { @Override protected ReplicationOperation createReplicatedOperation(Request request, ActionListener actionListener, Action.PrimaryShardReference primaryShardReference, @@ -460,9 +500,15 @@ public class TransportReplicationActionTests extends ESTestCase { } }; } + + @Override + public void onFailure(Exception e) { + throw new RuntimeException(e); + } }.run(); assertThat(executed.get(), equalTo(true)); assertPhase(task, "finished"); + assertFalse(request.isRetrySet.get()); } public void testPrimaryReference() throws Exception { @@ -582,7 +628,9 @@ public class TransportReplicationActionTests extends ESTestCase { state = ClusterState.builder(state).metaData(metaData).build(); setState(clusterService, state); AtomicBoolean executed = new AtomicBoolean(); - action.new AsyncPrimaryAction(new Request(shardId), createTransportChannel(new PlainActionFuture<>()), null) { + ShardRouting primaryShard = state.routingTable().shardRoutingTable(shardId).primaryShard(); + action.new AsyncPrimaryAction(new Request(shardId), primaryShard.allocationId().getId(), + createTransportChannel(new PlainActionFuture<>()), null) { @Override protected ReplicationOperation createReplicatedOperation(Request request, ActionListener actionListener, Action.PrimaryShardReference primaryShardReference, @@ -599,8 +647,10 @@ public class TransportReplicationActionTests extends ESTestCase { final String index = "test"; final ShardId shardId = new ShardId(index, "_na_", 0); // no replica, we only want to test on primary - setState(clusterService, state(index, true, ShardRoutingState.STARTED)); + final ClusterState state = state(index, true, ShardRoutingState.STARTED); + setState(clusterService, state); logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); + final ShardRouting primaryShard = state.routingTable().shardRoutingTable(shardId).primaryShard(); Request request = new Request(shardId); PlainActionFuture listener = new PlainActionFuture<>(); ReplicationTask task = maybeTask(); @@ -608,7 +658,7 @@ public class TransportReplicationActionTests extends ESTestCase { final boolean throwExceptionOnCreation = i == 1; final boolean throwExceptionOnRun = i == 2; final boolean respondWithError = i == 3; - action.new AsyncPrimaryAction(request, createTransportChannel(listener), task) { + action.new AsyncPrimaryAction(request, primaryShard.allocationId().getId(), createTransportChannel(listener), task) { @Override protected ReplicationOperation createReplicatedOperation(Request request, ActionListener actionListener, Action.PrimaryShardReference primaryShardReference, @@ -652,8 +702,9 @@ public class TransportReplicationActionTests extends ESTestCase { public void testReplicasCounter() throws Exception { final ShardId shardId = new ShardId("test", "_na_", 0); - setState(clusterService, state(shardId.getIndexName(), true, - ShardRoutingState.STARTED, ShardRoutingState.STARTED)); + final ClusterState state = state(shardId.getIndexName(), true, ShardRoutingState.STARTED, ShardRoutingState.STARTED); + setState(clusterService, state); + final ShardRouting replicaRouting = state.getRoutingTable().shardRoutingTable(shardId).replicaShards().get(0); boolean throwException = randomBoolean(); final ReplicationTask task = maybeTask(); Action action = new Action(Settings.EMPTY, "testActionWithExceptions", transportService, clusterService, threadPool) { @@ -669,7 +720,9 @@ public class TransportReplicationActionTests extends ESTestCase { }; final Action.ReplicaOperationTransportHandler replicaOperationTransportHandler = action.new ReplicaOperationTransportHandler(); try { - replicaOperationTransportHandler.messageReceived(new Request().setShardId(shardId), + replicaOperationTransportHandler.messageReceived( + new TransportReplicationAction.ConcreteShardRequest<>( + new Request().setShardId(shardId), replicaRouting.allocationId().getId()), createTransportChannel(new PlainActionFuture<>()), task); } catch (ElasticsearchException e) { assertThat(e.getMessage(), containsString("simulated")); @@ -711,6 +764,111 @@ public class TransportReplicationActionTests extends ESTestCase { assertEquals(ActiveShardCount.from(requestWaitForActiveShards), request.waitForActiveShards()); } + /** test that a primary request is rejected if it arrives at a shard with a wrong allocation id */ + public void testPrimaryActionRejectsWrongAid() throws Exception { + final String index = "test"; + final ShardId shardId = new ShardId(index, "_na_", 0); + setState(clusterService, state(index, true, ShardRoutingState.STARTED)); + PlainActionFuture listener = new PlainActionFuture<>(); + Request request = new Request(shardId).timeout("1ms"); + action.new PrimaryOperationTransportHandler().messageReceived( + new TransportReplicationAction.ConcreteShardRequest<>(request, "_not_a_valid_aid_"), + createTransportChannel(listener), maybeTask() + ); + try { + listener.get(); + fail("using a wrong aid didn't fail the operation"); + } catch (ExecutionException execException) { + Throwable throwable = execException.getCause(); + logger.debug("got exception:" , throwable); + assertTrue(throwable.getClass() + " is not a retry exception", action.retryPrimaryException(throwable)); + } + } + + /** test that a replica request is rejected if it arrives at a shard with a wrong allocation id */ + public void testReplicaActionRejectsWrongAid() throws Exception { + final String index = "test"; + final ShardId shardId = new ShardId(index, "_na_", 0); + ClusterState state = state(index, false, ShardRoutingState.STARTED, ShardRoutingState.STARTED); + final ShardRouting replica = state.routingTable().shardRoutingTable(shardId).replicaShards().get(0); + // simulate execution of the node holding the replica + state = ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(replica.currentNodeId())).build(); + setState(clusterService, state); + + PlainActionFuture listener = new PlainActionFuture<>(); + Request request = new Request(shardId).timeout("1ms"); + action.new ReplicaOperationTransportHandler().messageReceived( + new TransportReplicationAction.ConcreteShardRequest<>(request, "_not_a_valid_aid_"), + createTransportChannel(listener), maybeTask() + ); + try { + listener.get(); + fail("using a wrong aid didn't fail the operation"); + } catch (ExecutionException execException) { + Throwable throwable = execException.getCause(); + if (action.retryPrimaryException(throwable) == false) { + throw new AssertionError("thrown exception is not retriable", throwable); + } + assertThat(throwable.getMessage(), containsString("_not_a_valid_aid_")); + } + } + + /** + * test throwing a {@link org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnReplicaException} + * causes a retry + */ + public void testRetryOnReplica() throws Exception { + final ShardId shardId = new ShardId("test", "_na_", 0); + ClusterState state = state(shardId.getIndexName(), true, ShardRoutingState.STARTED, ShardRoutingState.STARTED); + final ShardRouting replica = state.getRoutingTable().shardRoutingTable(shardId).replicaShards().get(0); + // simulate execution of the node holding the replica + state = ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(replica.currentNodeId())).build(); + setState(clusterService, state); + AtomicBoolean throwException = new AtomicBoolean(true); + final ReplicationTask task = maybeTask(); + Action action = new Action(Settings.EMPTY, "testActionWithExceptions", transportService, clusterService, threadPool) { + @Override + protected ReplicaResult shardOperationOnReplica(Request request) { + assertPhase(task, "replica"); + if (throwException.get()) { + throw new RetryOnReplicaException(shardId, "simulation"); + } + return new ReplicaResult(); + } + }; + final Action.ReplicaOperationTransportHandler replicaOperationTransportHandler = action.new ReplicaOperationTransportHandler(); + final PlainActionFuture listener = new PlainActionFuture<>(); + final Request request = new Request().setShardId(shardId); + request.primaryTerm(state.metaData().getIndexSafe(shardId.getIndex()).primaryTerm(shardId.id())); + replicaOperationTransportHandler.messageReceived( + new TransportReplicationAction.ConcreteShardRequest<>(request, replica.allocationId().getId()), + createTransportChannel(listener), task); + if (listener.isDone()) { + listener.get(); // fail with the exception if there + fail("listener shouldn't be done"); + } + + // no retry yet + List capturedRequests = + transport.getCapturedRequestsByTargetNodeAndClear().get(replica.currentNodeId()); + assertThat(capturedRequests, nullValue()); + + // release the waiting + throwException.set(false); + setState(clusterService, state); + + capturedRequests = transport.getCapturedRequestsByTargetNodeAndClear().get(replica.currentNodeId()); + assertThat(capturedRequests, notNullValue()); + assertThat(capturedRequests.size(), equalTo(1)); + final CapturingTransport.CapturedRequest capturedRequest = capturedRequests.get(0); + assertThat(capturedRequest.action, equalTo("testActionWithExceptions[r]")); + assertThat(capturedRequest.request, instanceOf(TransportReplicationAction.ConcreteShardRequest.class)); + assertThat(((TransportReplicationAction.ConcreteShardRequest) capturedRequest.request).getRequest(), equalTo(request)); + assertThat(((TransportReplicationAction.ConcreteShardRequest) capturedRequest.request).getTargetAllocationID(), + equalTo(replica.allocationId().getId())); + } + + private void assertIndexShardCounter(int expected) { assertThat(count.get(), equalTo(expected)); } @@ -745,6 +903,7 @@ public class TransportReplicationActionTests extends ESTestCase { public static class Request extends ReplicationRequest { public AtomicBoolean processedOnPrimary = new AtomicBoolean(); public AtomicInteger processedOnReplicas = new AtomicInteger(); + public AtomicBoolean isRetrySet = new AtomicBoolean(false); public Request() { } @@ -766,6 +925,12 @@ public class TransportReplicationActionTests extends ESTestCase { public void readFrom(StreamInput in) throws IOException { super.readFrom(in); } + + @Override + public void onRetry() { + super.onRetry(); + isRetrySet.set(true); + } } static class Response extends ReplicationResponse { @@ -776,7 +941,7 @@ public class TransportReplicationActionTests extends ESTestCase { Action(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, ThreadPool threadPool) { - super(settings, actionName, transportService, clusterService, null, threadPool, + super(settings, actionName, transportService, clusterService, mockIndicesService(clusterService), threadPool, new ShardStateAction(settings, clusterService, transportService, null, null, threadPool), new ActionFilters(new HashSet<>()), new IndexNameExpressionResolver(Settings.EMPTY), Request::new, Request::new, ThreadPool.Names.SAME); @@ -804,43 +969,76 @@ public class TransportReplicationActionTests extends ESTestCase { protected boolean resolveIndex() { return false; } + } - @Override - protected void acquirePrimaryShardReference(ShardId shardId, ActionListener onReferenceAcquired) { + final IndicesService mockIndicesService(ClusterService clusterService) { + final IndicesService indicesService = mock(IndicesService.class); + when(indicesService.indexServiceSafe(any(Index.class))).then(invocation -> { + Index index = (Index)invocation.getArguments()[0]; + final ClusterState state = clusterService.state(); + final IndexMetaData indexSafe = state.metaData().getIndexSafe(index); + return mockIndexService(indexSafe, clusterService); + }); + when(indicesService.indexService(any(Index.class))).then(invocation -> { + Index index = (Index) invocation.getArguments()[0]; + final ClusterState state = clusterService.state(); + if (state.metaData().hasIndex(index.getName())) { + final IndexMetaData indexSafe = state.metaData().getIndexSafe(index); + return mockIndexService(clusterService.state().metaData().getIndexSafe(index), clusterService); + } else { + return null; + } + }); + return indicesService; + } + + final IndexService mockIndexService(final IndexMetaData indexMetaData, ClusterService clusterService) { + final IndexService indexService = mock(IndexService.class); + when(indexService.getShard(anyInt())).then(invocation -> { + int shard = (Integer) invocation.getArguments()[0]; + final ShardId shardId = new ShardId(indexMetaData.getIndex(), shard); + if (shard > indexMetaData.getNumberOfShards()) { + throw new ShardNotFoundException(shardId); + } + return mockIndexShard(shardId, clusterService); + }); + return indexService; + } + + private IndexShard mockIndexShard(ShardId shardId, ClusterService clusterService) { + final IndexShard indexShard = mock(IndexShard.class); + doAnswer(invocation -> { + ActionListener callback = (ActionListener) invocation.getArguments()[0]; count.incrementAndGet(); - PrimaryShardReference primaryShardReference = new PrimaryShardReference(null, null) { - @Override - public boolean isRelocated() { - return isRelocated.get(); - } - - @Override - public void failShard(String reason, @Nullable Exception e) { - throw new UnsupportedOperationException(); - } - - @Override - public ShardRouting routingEntry() { - ShardRouting shardRouting = clusterService.state().getRoutingTable() - .shardRoutingTable(shardId).primaryShard(); - assert shardRouting != null; - return shardRouting; - } - - @Override - public void close() { - count.decrementAndGet(); - } - }; - - onReferenceAcquired.onResponse(primaryShardReference); - } - - @Override - protected void acquireReplicaOperationLock(ShardId shardId, long primaryTerm, ActionListener onLockAcquired) { + callback.onResponse(count::decrementAndGet); + return null; + }).when(indexShard).acquirePrimaryOperationLock(any(ActionListener.class), anyString()); + doAnswer(invocation -> { + long term = (Long)invocation.getArguments()[0]; + ActionListener callback = (ActionListener) invocation.getArguments()[1]; + final long primaryTerm = indexShard.getPrimaryTerm(); + if (term < primaryTerm) { + throw new IllegalArgumentException(String.format(Locale.ROOT, "%s operation term [%d] is too old (current [%d])", + shardId, term, primaryTerm)); + } count.incrementAndGet(); - onLockAcquired.onResponse(count::decrementAndGet); - } + callback.onResponse(count::decrementAndGet); + return null; + }).when(indexShard).acquireReplicaOperationLock(anyLong(), any(ActionListener.class), anyString()); + when(indexShard.routingEntry()).thenAnswer(invocationOnMock -> { + final ClusterState state = clusterService.state(); + final RoutingNode node = state.getRoutingNodes().node(state.nodes().getLocalNodeId()); + final ShardRouting routing = node.getByShardId(shardId); + if (routing == null) { + throw new ShardNotFoundException(shardId, "shard is no longer assigned to current node"); + } + return routing; + }); + when(indexShard.state()).thenAnswer(invocationOnMock -> isRelocated.get() ? IndexShardState.RELOCATED : IndexShardState.STARTED); + doThrow(new AssertionError("failed shard is not supported")).when(indexShard).failShard(anyString(), any(Exception.class)); + when(indexShard.getPrimaryTerm()).thenAnswer(i -> + clusterService.state().metaData().getIndexSafe(shardId.getIndex()).primaryTerm(shardId.id())); + return indexShard; } class NoopReplicationOperation extends ReplicationOperation { @@ -858,11 +1056,6 @@ public class TransportReplicationActionTests extends ESTestCase { * Transport channel that is needed for replica operation testing. */ public TransportChannel createTransportChannel(final PlainActionFuture listener) { - return createTransportChannel(listener, error -> { - }); - } - - public TransportChannel createTransportChannel(final PlainActionFuture listener, Consumer consumer) { return new TransportChannel() { @Override @@ -887,7 +1080,6 @@ public class TransportReplicationActionTests extends ESTestCase { @Override public void sendResponse(Exception exception) throws IOException { - consumer.accept(exception); listener.onFailure(exception); } diff --git a/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java b/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java index a98433a1007..7b606ee4159 100644 --- a/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java @@ -55,7 +55,7 @@ public class UpdateRequestTests extends ESTestCase { assertThat(script, notNullValue()); assertThat(script.getScript(), equalTo("script1")); assertThat(script.getType(), equalTo(ScriptType.INLINE)); - assertThat(script.getLang(), nullValue()); + assertThat(script.getLang(), equalTo(Script.DEFAULT_SCRIPT_LANG)); Map params = script.getParams(); assertThat(params, nullValue()); @@ -67,7 +67,7 @@ public class UpdateRequestTests extends ESTestCase { assertThat(script, notNullValue()); assertThat(script.getScript(), equalTo("script1")); assertThat(script.getType(), equalTo(ScriptType.INLINE)); - assertThat(script.getLang(), nullValue()); + assertThat(script.getLang(), equalTo(Script.DEFAULT_SCRIPT_LANG)); params = script.getParams(); assertThat(params, nullValue()); @@ -79,7 +79,7 @@ public class UpdateRequestTests extends ESTestCase { assertThat(script, notNullValue()); assertThat(script.getScript(), equalTo("script1")); assertThat(script.getType(), equalTo(ScriptType.INLINE)); - assertThat(script.getLang(), nullValue()); + assertThat(script.getLang(), equalTo(Script.DEFAULT_SCRIPT_LANG)); params = script.getParams(); assertThat(params, notNullValue()); assertThat(params.size(), equalTo(1)); @@ -92,7 +92,7 @@ public class UpdateRequestTests extends ESTestCase { assertThat(script, notNullValue()); assertThat(script.getScript(), equalTo("script1")); assertThat(script.getType(), equalTo(ScriptType.INLINE)); - assertThat(script.getLang(), nullValue()); + assertThat(script.getLang(), equalTo(Script.DEFAULT_SCRIPT_LANG)); params = script.getParams(); assertThat(params, notNullValue()); assertThat(params.size(), equalTo(1)); @@ -107,7 +107,7 @@ public class UpdateRequestTests extends ESTestCase { assertThat(script, notNullValue()); assertThat(script.getScript(), equalTo("script1")); assertThat(script.getType(), equalTo(ScriptType.INLINE)); - assertThat(script.getLang(), nullValue()); + assertThat(script.getLang(), equalTo(Script.DEFAULT_SCRIPT_LANG)); params = script.getParams(); assertThat(params, notNullValue()); assertThat(params.size(), equalTo(1)); @@ -124,7 +124,7 @@ public class UpdateRequestTests extends ESTestCase { assertThat(script, notNullValue()); assertThat(script.getScript(), equalTo("script1")); assertThat(script.getType(), equalTo(ScriptType.INLINE)); - assertThat(script.getLang(), nullValue()); + assertThat(script.getLang(), equalTo(Script.DEFAULT_SCRIPT_LANG)); params = script.getParams(); assertThat(params, notNullValue()); assertThat(params.size(), equalTo(1)); diff --git a/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java b/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java index eb6b27b086c..ebca512baec 100644 --- a/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java +++ b/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java @@ -807,7 +807,7 @@ public class IndexAliasesIT extends ESIntegTestCase { final int numDocs = scaledRandomIntBetween(5, 52); for (int i = 1; i <= numDocs; i++) { - client().prepareIndex("my-index", "my-type").setCreate(true).setSource("timestamp", "2016-12-12").get(); + client().prepareIndex("my-index", "my-type").setSource("timestamp", "2016-12-12").get(); if (i % 2 == 0) { refresh(); SearchResponse response = client().prepareSearch("filter1").get(); diff --git a/core/src/test/java/org/elasticsearch/bootstrap/BootstrapCheckTests.java b/core/src/test/java/org/elasticsearch/bootstrap/BootstrapCheckTests.java index cbbe05f6e74..ab305c65cbb 100644 --- a/core/src/test/java/org/elasticsearch/bootstrap/BootstrapCheckTests.java +++ b/core/src/test/java/org/elasticsearch/bootstrap/BootstrapCheckTests.java @@ -19,8 +19,8 @@ package org.elasticsearch.bootstrap; +import org.apache.logging.log4j.Logger; import org.apache.lucene.util.Constants; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; @@ -67,13 +67,13 @@ public class BootstrapCheckTests extends ESTestCase { } public void testNoLogMessageInNonProductionMode() { - final ESLogger logger = mock(ESLogger.class); + final Logger logger = mock(Logger.class); BootstrapCheck.check(false, randomBoolean(), Collections.emptyList(), logger); verifyNoMoreInteractions(logger); } public void testLogMessageInProductionMode() { - final ESLogger logger = mock(ESLogger.class); + final Logger logger = mock(Logger.class); final boolean ignoreSystemChecks = randomBoolean(); BootstrapCheck.check(true, ignoreSystemChecks, Collections.emptyList(), logger); verify(logger).info("bound or publishing to a non-loopback or non-link-local address, enforcing bootstrap checks"); @@ -550,7 +550,7 @@ public class BootstrapCheckTests extends ESTestCase { () -> BootstrapCheck.check(true, false, Collections.singletonList(check), "testIgnoringSystemChecks")); assertThat(notIgnored, hasToString(containsString("error"))); - final ESLogger logger = mock(ESLogger.class); + final Logger logger = mock(Logger.class); // nothing should happen if we ignore system checks BootstrapCheck.check(true, true, Collections.singletonList(check), logger); diff --git a/core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java b/core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java index bb3ef29176e..467608922d1 100644 --- a/core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java +++ b/core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java @@ -111,15 +111,9 @@ public class JarHellTests extends ESTestCase { } } - public void testLog4jLeniency() throws Exception { + public void testLog4jThrowableProxyLeniency() throws Exception { Path dir = createTempDir(); - URL[] jars = {makeJar(dir, "foo.jar", null, "org/apache/log4j/DuplicateClass.class"), makeJar(dir, "bar.jar", null, "org/apache/log4j/DuplicateClass.class")}; - JarHell.checkJarHell(jars); - } - - public void testBaseDateTimeLeniency() throws Exception { - Path dir = createTempDir(); - URL[] jars = {makeJar(dir, "foo.jar", null, "org/joda/time/base/BaseDateTime.class"), makeJar(dir, "bar.jar", null, "org/joda/time/base/BaseDateTime.class")}; + URL[] jars = {makeJar(dir, "foo.jar", null, "org.apache.logging.log4j.core.impl.ThrowableProxy.class"), makeJar(dir, "bar.jar", null, "org.apache.logging.log4j.core.impl.ThrowableProxy.class")}; JarHell.checkJarHell(jars); } diff --git a/core/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java b/core/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java index b48fcc78c6c..5a2b8a5a143 100644 --- a/core/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java +++ b/core/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java @@ -19,20 +19,25 @@ package org.elasticsearch.bootstrap; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.LogEvent; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.util.Constants; -import org.elasticsearch.common.SuppressLoggerChecks; import org.elasticsearch.common.io.PathUtils; -import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.common.logging.TestLoggers; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.MockLogAppender; import java.io.BufferedReader; import java.io.IOException; import java.nio.file.Path; +import java.util.Arrays; +import java.util.function.Predicate; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.greaterThan; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.reset; import static org.mockito.Mockito.verify; @@ -47,8 +52,7 @@ public class MaxMapCountCheckTests extends ESTestCase { } } - @SuppressLoggerChecks(reason = "mock usage") - public void testGetMaxMapCount() throws IOException { + public void testGetMaxMapCount() throws IOException, IllegalAccessException { final long procSysVmMaxMapCount = randomIntBetween(1, Integer.MAX_VALUE); final BufferedReader reader = mock(BufferedReader.class); when(reader.readLine()).thenReturn(Long.toString(procSysVmMaxMapCount)); @@ -64,20 +68,92 @@ public class MaxMapCountCheckTests extends ESTestCase { assertThat(check.getMaxMapCount(), equalTo(procSysVmMaxMapCount)); verify(reader).close(); - reset(reader); - final IOException ioException = new IOException("fatal"); - when(reader.readLine()).thenThrow(ioException); - final ESLogger logger = mock(ESLogger.class); - assertThat(check.getMaxMapCount(logger), equalTo(-1L)); - verify(logger).warn("I/O exception while trying to read [{}]", ioException, procSysVmMaxMapCountPath); - verify(reader).close(); + { + reset(reader); + final IOException ioException = new IOException("fatal"); + when(reader.readLine()).thenThrow(ioException); + final Logger logger = ESLoggerFactory.getLogger("testGetMaxMapCountIOException"); + final MockLogAppender appender = new MockLogAppender(); + appender.addExpectation( + new ParameterizedMessageLoggingExpectation( + "expected logged I/O exception", + "testGetMaxMapCountIOException", + Level.WARN, + "I/O exception while trying to read [{}]", + new Object[] { procSysVmMaxMapCountPath }, + e -> ioException == e)); + TestLoggers.addAppender(logger, appender); + assertThat(check.getMaxMapCount(logger), equalTo(-1L)); + appender.assertAllExpectationsMatched(); + verify(reader).close(); + TestLoggers.removeAppender(logger, appender); + } + + { + reset(reader); + when(reader.readLine()).thenReturn("eof"); + final Logger logger = ESLoggerFactory.getLogger("testGetMaxMapCountNumberFormatException"); + final MockLogAppender appender = new MockLogAppender(); + appender.addExpectation( + new ParameterizedMessageLoggingExpectation( + "expected logged number format exception", + "testGetMaxMapCountNumberFormatException", + Level.WARN, + "unable to parse vm.max_map_count [{}]", + new Object[] { "eof" }, + e -> e instanceof NumberFormatException && e.getMessage().equals("For input string: \"eof\""))); + TestLoggers.addAppender(logger, appender); + assertThat(check.getMaxMapCount(logger), equalTo(-1L)); + appender.assertAllExpectationsMatched(); + verify(reader).close(); + TestLoggers.removeAppender(logger, appender); + } + + } + + private static class ParameterizedMessageLoggingExpectation implements MockLogAppender.LoggingExpectation { + + private boolean saw = false; + + private final String name; + private final String loggerName; + private final Level level; + private final String messagePattern; + private final Object[] arguments; + private final Predicate throwablePredicate; + + private ParameterizedMessageLoggingExpectation( + final String name, + final String loggerName, + final Level level, + final String messagePattern, + final Object[] arguments, + final Predicate throwablePredicate) { + this.name = name; + this.loggerName = loggerName; + this.level = level; + this.messagePattern = messagePattern; + this.arguments = arguments; + this.throwablePredicate = throwablePredicate; + } + + @Override + public void match(LogEvent event) { + if (event.getLevel().equals(level) && + event.getLoggerName().equals(loggerName) && + event.getMessage() instanceof ParameterizedMessage) { + final ParameterizedMessage message = (ParameterizedMessage)event.getMessage(); + saw = message.getFormat().equals(messagePattern) && + Arrays.deepEquals(arguments, message.getParameters()) && + throwablePredicate.test(event.getThrown()); + } + } + + @Override + public void assertMatched() { + assertTrue(name, saw); + } - reset(reader); - reset(logger); - when(reader.readLine()).thenReturn("eof"); - assertThat(check.getMaxMapCount(logger), equalTo(-1L)); - verify(logger).warn(eq("unable to parse vm.max_map_count [{}]"), any(NumberFormatException.class), eq("eof")); - verify(reader).close(); } public void testMaxMapCountCheckRead() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java index 45c89062a5a..3d35c42adcf 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java @@ -103,8 +103,8 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { @Before public void initIndexesList() throws Exception { - indexes = OldIndexUtils.loadIndexesList("index", getBwcIndicesPath()); - unsupportedIndexes = OldIndexUtils.loadIndexesList("unsupported", getBwcIndicesPath()); + indexes = OldIndexUtils.loadDataFilesList("index", getBwcIndicesPath()); + unsupportedIndexes = OldIndexUtils.loadDataFilesList("unsupported", getBwcIndicesPath()); } @AfterClass diff --git a/core/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java b/core/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java index 9c99b0f8c65..98c7b1a3d67 100644 --- a/core/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java @@ -20,6 +20,7 @@ package org.elasticsearch.cluster.allocation; import com.carrotsearch.hppc.ObjectIntHashMap; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.IndexRoutingTable; @@ -27,7 +28,6 @@ import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.zen.ZenDiscovery; @@ -46,7 +46,7 @@ import static org.hamcrest.Matchers.equalTo; @ClusterScope(scope= ESIntegTestCase.Scope.TEST, numDataNodes =0, minNumDataNodes = 2) public class AwarenessAllocationIT extends ESIntegTestCase { - private final ESLogger logger = Loggers.getLogger(AwarenessAllocationIT.class); + private final Logger logger = Loggers.getLogger(AwarenessAllocationIT.class); @Override protected int numberOfReplicas() { diff --git a/core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java b/core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java index 23dd034bfe0..22d02f51469 100644 --- a/core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.allocation; +import org.apache.logging.log4j.Logger; import org.apache.lucene.util.IOUtils; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse; @@ -39,7 +40,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDeci import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.elasticsearch.common.Priority; import org.elasticsearch.common.io.FileSystemUtils; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -69,7 +69,7 @@ import static org.hamcrest.Matchers.hasSize; */ @ClusterScope(scope = Scope.TEST, numDataNodes = 0) public class ClusterRerouteIT extends ESIntegTestCase { - private final ESLogger logger = Loggers.getLogger(ClusterRerouteIT.class); + private final Logger logger = Loggers.getLogger(ClusterRerouteIT.class); public void testRerouteWithCommands_disableAllocationSettings() throws Exception { Settings commonSettings = Settings.builder() diff --git a/core/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java b/core/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java index 4cc04086421..627fc03701c 100644 --- a/core/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java @@ -19,12 +19,12 @@ package org.elasticsearch.cluster.allocation; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilders; @@ -39,7 +39,7 @@ import static org.hamcrest.Matchers.equalTo; @ClusterScope(scope= Scope.TEST, numDataNodes =0) public class FilteringAllocationIT extends ESIntegTestCase { - private final ESLogger logger = Loggers.getLogger(FilteringAllocationIT.class); + private final Logger logger = Loggers.getLogger(FilteringAllocationIT.class); public void testDecommissionNodeNoReplicas() throws Exception { logger.info("--> starting 2 nodes"); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java index 7971b2773b6..3803f54ba76 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.cluster.routing.allocation; import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -30,7 +31,6 @@ import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; @@ -45,7 +45,7 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; public class AddIncrementallyTests extends ESAllocationTestCase { - private final ESLogger logger = Loggers.getLogger(AddIncrementallyTests.class); + private final Logger logger = Loggers.getLogger(AddIncrementallyTests.class); public void testAddNodesAndIndices() { Settings.Builder settings = Settings.builder(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java index 8c764ed2ae9..24faf9c45ae 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -41,7 +42,6 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; @@ -66,7 +66,7 @@ import static org.hamcrest.Matchers.nullValue; /** */ public class AllocationCommandsTests extends ESAllocationTestCase { - private final ESLogger logger = Loggers.getLogger(AllocationCommandsTests.class); + private final Logger logger = Loggers.getLogger(AllocationCommandsTests.class); public void testMoveShardCommand() { AllocationService allocation = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java index bf71fa766ed..f85cc97560e 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -31,7 +32,6 @@ import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands; import org.elasticsearch.cluster.routing.allocation.command.CancelAllocationCommand; import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.cluster.ESAllocationTestCase; @@ -49,7 +49,7 @@ import static org.hamcrest.Matchers.sameInstance; */ public class AwarenessAllocationTests extends ESAllocationTestCase { - private final ESLogger logger = Loggers.getLogger(AwarenessAllocationTests.class); + private final Logger logger = Loggers.getLogger(AwarenessAllocationTests.class); public void testMoveShardOnceNewNodeWithAttributeAdded1() { AllocationService strategy = createAllocationService(Settings.builder() diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java index f45567d280a..b426220614e 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.cluster.routing.allocation; import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.apache.logging.log4j.Logger; import org.apache.lucene.util.ArrayUtil; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; @@ -36,7 +37,6 @@ import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -52,7 +52,7 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; public class BalanceConfigurationTests extends ESAllocationTestCase { - private final ESLogger logger = Loggers.getLogger(BalanceConfigurationTests.class); + private final Logger logger = Loggers.getLogger(BalanceConfigurationTests.class); // TODO maybe we can randomize these numbers somehow final int numberOfNodes = 25; final int numberOfIndices = 12; @@ -198,7 +198,7 @@ public class BalanceConfigurationTests extends ESAllocationTestCase { } - private void assertReplicaBalance(ESLogger logger, RoutingNodes nodes, int numberOfNodes, int numberOfIndices, int numberOfReplicas, int numberOfShards, float treshold) { + private void assertReplicaBalance(Logger logger, RoutingNodes nodes, int numberOfNodes, int numberOfIndices, int numberOfReplicas, int numberOfShards, float treshold) { final int numShards = numberOfIndices * numberOfShards * (numberOfReplicas + 1); final float avgNumShards = (float) (numShards) / (float) (numberOfNodes); final int minAvgNumberOfShards = Math.round(Math.round(Math.floor(avgNumShards - treshold))); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java index 9346bebd172..7d7db4ef216 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -29,7 +30,6 @@ import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.cluster.ESAllocationTestCase; @@ -45,7 +45,7 @@ import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { - private final ESLogger logger = Loggers.getLogger(ClusterRebalanceRoutingTests.class); + private final Logger logger = Loggers.getLogger(ClusterRebalanceRoutingTests.class); public void testAlways() { AllocationService strategy = createAllocationService(Settings.builder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ConcurrentRebalanceRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ConcurrentRebalanceRoutingTests.java index 36565e1f85e..d7b56c6508e 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ConcurrentRebalanceRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ConcurrentRebalanceRoutingTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -26,7 +27,6 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.cluster.ESAllocationTestCase; @@ -39,7 +39,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; public class ConcurrentRebalanceRoutingTests extends ESAllocationTestCase { - private final ESLogger logger = Loggers.getLogger(ConcurrentRebalanceRoutingTests.class); + private final Logger logger = Loggers.getLogger(ConcurrentRebalanceRoutingTests.class); public void testClusterConcurrentRebalance() { AllocationService strategy = createAllocationService(Settings.builder() diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java index abc69cbf91c..c4d8545e97c 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -28,7 +29,6 @@ import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands; import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.cluster.ESAllocationTestCase; @@ -41,7 +41,7 @@ import static org.hamcrest.Matchers.equalTo; /** */ public class DeadNodesAllocationTests extends ESAllocationTestCase { - private final ESLogger logger = Loggers.getLogger(DeadNodesAllocationTests.class); + private final Logger logger = Loggers.getLogger(DeadNodesAllocationTests.class); public void testSimpleDeadNodeOnStartedPrimaryShard() { AllocationService allocation = createAllocationService(Settings.builder() diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java index d840e5ef418..5707a229da3 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -27,7 +28,6 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.cluster.ESAllocationTestCase; @@ -40,7 +40,7 @@ import static org.hamcrest.Matchers.equalTo; * */ public class ElectReplicaAsPrimaryDuringRelocationTests extends ESAllocationTestCase { - private final ESLogger logger = Loggers.getLogger(ElectReplicaAsPrimaryDuringRelocationTests.class); + private final Logger logger = Loggers.getLogger(ElectReplicaAsPrimaryDuringRelocationTests.class); public void testElectReplicaAsPrimaryDuringRelocation() { AllocationService strategy = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java index 15f08a9723c..d76b666549e 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.ClusterInfoService; @@ -32,7 +33,6 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands; import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.cluster.ESAllocationTestCase; @@ -43,7 +43,7 @@ import static org.hamcrest.Matchers.equalTo; /** */ public class ExpectedShardSizeAllocationTests extends ESAllocationTestCase { - private final ESLogger logger = Loggers.getLogger(ExpectedShardSizeAllocationTests.class); + private final Logger logger = Loggers.getLogger(ExpectedShardSizeAllocationTests.class); public void testInitializingHasExpectedSize() { final long byteSize = randomIntBetween(0, Integer.MAX_VALUE); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java index 811c57e4200..6dd5c3a7b30 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -28,7 +29,6 @@ import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.cluster.ESAllocationTestCase; @@ -38,7 +38,7 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; import static org.hamcrest.Matchers.equalTo; public class FailedNodeRoutingTests extends ESAllocationTestCase { - private final ESLogger logger = Loggers.getLogger(FailedNodeRoutingTests.class); + private final Logger logger = Loggers.getLogger(FailedNodeRoutingTests.class); public void testSimpleFailedNodeTest() { AllocationService strategy = createAllocationService(Settings.builder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java index 8c7043a7439..852fbd1944b 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -30,7 +31,6 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands; import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.cluster.ESAllocationTestCase; @@ -54,7 +54,7 @@ import static org.hamcrest.Matchers.nullValue; * */ public class FailedShardsRoutingTests extends ESAllocationTestCase { - private final ESLogger logger = Loggers.getLogger(FailedShardsRoutingTests.class); + private final Logger logger = Loggers.getLogger(FailedShardsRoutingTests.class); public void testFailedShardPrimaryRelocatingToAndFrom() { AllocationService allocation = createAllocationService(Settings.builder() diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterRoutingTests.java index 16b918373a1..6d0542b95cd 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterRoutingTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -28,7 +29,6 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.cluster.ESAllocationTestCase; @@ -44,7 +44,7 @@ import static org.hamcrest.Matchers.equalTo; /** */ public class FilterRoutingTests extends ESAllocationTestCase { - private final ESLogger logger = Loggers.getLogger(FilterRoutingTests.class); + private final Logger logger = Loggers.getLogger(FilterRoutingTests.class); public void testClusterFilters() { AllocationService strategy = createAllocationService(Settings.builder() diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java index 311a1825c1d..cd35669f048 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -27,7 +28,6 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.cluster.ESAllocationTestCase; @@ -42,7 +42,7 @@ import static org.hamcrest.Matchers.nullValue; * */ public class IndexBalanceTests extends ESAllocationTestCase { - private final ESLogger logger = Loggers.getLogger(IndexBalanceTests.class); + private final Logger logger = Loggers.getLogger(IndexBalanceTests.class); public void testBalanceAllNodesStarted() { AllocationService strategy = createAllocationService(Settings.builder() diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java index 2e8679ef81c..eab7d469dad 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -44,7 +45,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllo import org.elasticsearch.cluster.routing.allocation.decider.NodeVersionAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryActiveAllocationDecider; import org.elasticsearch.common.UUIDs; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.LocalTransportAddress; @@ -76,7 +76,7 @@ import static org.hamcrest.Matchers.nullValue; * */ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { - private final ESLogger logger = Loggers.getLogger(NodeVersionAllocationDeciderTests.class); + private final Logger logger = Loggers.getLogger(NodeVersionAllocationDeciderTests.class); public void testDoNotAllocateFromPrimary() { AllocationService strategy = createAllocationService(Settings.builder() diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java index 17e40119525..7badada5d58 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -26,7 +27,6 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.cluster.ESAllocationTestCase; @@ -37,7 +37,7 @@ import static org.hamcrest.Matchers.equalTo; /** */ public class PreferPrimaryAllocationTests extends ESAllocationTestCase { - private final ESLogger logger = Loggers.getLogger(PreferPrimaryAllocationTests.class); + private final Logger logger = Loggers.getLogger(PreferPrimaryAllocationTests.class); public void testPreferPrimaryAllocationOverReplicas() { logger.info("create an allocation with 1 initial recoveries"); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java index 2d07e60f140..ccdc4deb027 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -26,7 +27,6 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.cluster.ESAllocationTestCase; @@ -40,7 +40,7 @@ import static org.hamcrest.Matchers.nullValue; * */ public class PrimaryElectionRoutingTests extends ESAllocationTestCase { - private final ESLogger logger = Loggers.getLogger(PrimaryElectionRoutingTests.class); + private final Logger logger = Loggers.getLogger(PrimaryElectionRoutingTests.class); public void testBackupElectionToPrimaryWhenPrimaryCanBeAllocatedToAnotherNode() { AllocationService strategy = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java index a2dab942e3e..c1d4a168e86 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -26,7 +27,6 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.cluster.ESAllocationTestCase; @@ -39,7 +39,7 @@ import static org.hamcrest.Matchers.equalTo; * */ public class PrimaryNotRelocatedWhileBeingRecoveredTests extends ESAllocationTestCase { - private final ESLogger logger = Loggers.getLogger(PrimaryNotRelocatedWhileBeingRecoveredTests.class); + private final Logger logger = Loggers.getLogger(PrimaryNotRelocatedWhileBeingRecoveredTests.class); public void testPrimaryNotRelocatedWhileBeingRecoveredFrom() { AllocationService strategy = createAllocationService(Settings.builder() diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java index f1c4d99d87f..a6929b651eb 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.ClusterInfoService; @@ -31,7 +32,6 @@ import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.cluster.ESAllocationTestCase; @@ -47,7 +47,7 @@ import static org.hamcrest.Matchers.nullValue; * */ public class RebalanceAfterActiveTests extends ESAllocationTestCase { - private final ESLogger logger = Loggers.getLogger(RebalanceAfterActiveTests.class); + private final Logger logger = Loggers.getLogger(RebalanceAfterActiveTests.class); public void testRebalanceOnlyAfterAllShardsAreActive() { final long[] sizes = new long[5]; diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java index d7ad0972b12..cf9db4ec542 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -26,7 +27,6 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.cluster.ESAllocationTestCase; @@ -42,7 +42,7 @@ import static org.hamcrest.Matchers.nullValue; * */ public class ReplicaAllocatedAfterPrimaryTests extends ESAllocationTestCase { - private final ESLogger logger = Loggers.getLogger(ReplicaAllocatedAfterPrimaryTests.class); + private final Logger logger = Loggers.getLogger(ReplicaAllocatedAfterPrimaryTests.class); public void testBackupIsAllocatedAfterPrimary() { AllocationService strategy = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java index dbcd035ad79..c5998b0b73a 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -29,7 +30,6 @@ import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.cluster.ESAllocationTestCase; @@ -42,7 +42,7 @@ import static org.hamcrest.Matchers.equalTo; * */ public class RoutingNodesIntegrityTests extends ESAllocationTestCase { - private final ESLogger logger = Loggers.getLogger(IndexBalanceTests.class); + private final Logger logger = Loggers.getLogger(IndexBalanceTests.class); public void testBalanceAllNodesStarted() { AllocationService strategy = createAllocationService(Settings.builder() diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java index 908ec3a4236..69c2efe9b0b 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -30,7 +31,6 @@ import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.LocalTransportAddress; @@ -44,7 +44,7 @@ import static org.hamcrest.Matchers.equalTo; /** */ public class SameShardRoutingTests extends ESAllocationTestCase { - private final ESLogger logger = Loggers.getLogger(SameShardRoutingTests.class); + private final Logger logger = Loggers.getLogger(SameShardRoutingTests.class); public void testSameHost() { AllocationService strategy = createAllocationService( diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java index de3189956aa..534e2af5a89 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -27,7 +28,6 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.cluster.ESAllocationTestCase; @@ -38,7 +38,7 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED; import static org.hamcrest.Matchers.equalTo; public class ShardVersioningTests extends ESAllocationTestCase { - private final ESLogger logger = Loggers.getLogger(ShardVersioningTests.class); + private final Logger logger = Loggers.getLogger(ShardVersioningTests.class); public void testSimple() { AllocationService strategy = createAllocationService(Settings.builder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java index b01fb9bcc6e..930c099f956 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -29,7 +30,6 @@ import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.cluster.ESAllocationTestCase; @@ -43,7 +43,7 @@ import static org.hamcrest.Matchers.equalTo; /** */ public class ShardsLimitAllocationTests extends ESAllocationTestCase { - private final ESLogger logger = Loggers.getLogger(ShardsLimitAllocationTests.class); + private final Logger logger = Loggers.getLogger(ShardsLimitAllocationTests.class); public void testIndexLevelShardsLimitAllocate() { AllocationService strategy = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java index 0a1bf218b59..c30ceef08b9 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -30,7 +31,6 @@ import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.cluster.ESAllocationTestCase; @@ -56,7 +56,7 @@ import static org.hamcrest.Matchers.nullValue; * */ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase { - private final ESLogger logger = Loggers.getLogger(SingleShardNoReplicasRoutingTests.class); + private final Logger logger = Loggers.getLogger(SingleShardNoReplicasRoutingTests.class); public void testSingleIndexStartedShard() { AllocationService strategy = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java index 95d36815293..cad5654eaa4 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -26,7 +27,6 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.cluster.ESAllocationTestCase; @@ -41,7 +41,7 @@ import static org.hamcrest.Matchers.nullValue; * */ public class SingleShardOneReplicaRoutingTests extends ESAllocationTestCase { - private final ESLogger logger = Loggers.getLogger(SingleShardOneReplicaRoutingTests.class); + private final Logger logger = Loggers.getLogger(SingleShardOneReplicaRoutingTests.class); public void testSingleIndexFirstStartPrimaryThenBackups() { AllocationService strategy = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java index f629e627671..a6a7e6c0651 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -27,7 +28,6 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.cluster.ESAllocationTestCase; @@ -44,7 +44,7 @@ import static org.hamcrest.Matchers.nullValue; * */ public class TenShardsOneReplicaRoutingTests extends ESAllocationTestCase { - private final ESLogger logger = Loggers.getLogger(TenShardsOneReplicaRoutingTests.class); + private final Logger logger = Loggers.getLogger(TenShardsOneReplicaRoutingTests.class); public void testSingleIndexFirstStartPrimaryThenBackups() { AllocationService strategy = createAllocationService(Settings.builder() diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java index 497d2e0e265..9083085b5c7 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.cluster.routing.allocation; import com.carrotsearch.hppc.IntHashSet; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -30,7 +31,6 @@ import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands; import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.elasticsearch.cluster.routing.allocation.decider.Decision; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.snapshots.Snapshot; @@ -47,7 +47,7 @@ import static org.hamcrest.Matchers.equalTo; * */ public class ThrottlingAllocationTests extends ESAllocationTestCase { - private final ESLogger logger = Loggers.getLogger(ThrottlingAllocationTests.class); + private final Logger logger = Loggers.getLogger(ThrottlingAllocationTests.class); public void testPrimaryRecoveryThrottling() { AllocationService strategy = createAllocationService(Settings.builder() diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java index 9486c4c3fcc..4857086e24b 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -26,7 +27,6 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.cluster.ESAllocationTestCase; @@ -43,7 +43,7 @@ import static org.hamcrest.Matchers.nullValue; * */ public class UpdateNumberOfReplicasTests extends ESAllocationTestCase { - private final ESLogger logger = Loggers.getLogger(UpdateNumberOfReplicasTests.class); + private final Logger logger = Loggers.getLogger(UpdateNumberOfReplicasTests.class); public void testUpdateNumberOfReplicas() { AllocationService strategy = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java index 75c33c44cf1..98268e409fc 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java @@ -56,6 +56,7 @@ import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; import static java.util.Collections.emptyMap; import static java.util.Collections.singleton; @@ -729,10 +730,9 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { ImmutableOpenMap shardSizes = shardSizesBuilder.build(); final ClusterInfo clusterInfo = new DevNullClusterInfo(usages, usages, shardSizes); + DiskThresholdDecider decider = makeDecider(diskSettings); AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY, - new HashSet<>(Arrays.asList( - new SameShardAllocationDecider(Settings.EMPTY), - makeDecider(diskSettings)))); + new HashSet<>(Arrays.asList(new SameShardAllocationDecider(Settings.EMPTY), decider))); ClusterInfoService cis = new ClusterInfoService() { @Override @@ -832,6 +832,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { ImmutableOpenMap.Builder shardSizesBuilder = ImmutableOpenMap.builder(); shardSizesBuilder.put("[test][0][p]", 40L); shardSizesBuilder.put("[test][1][p]", 40L); + shardSizesBuilder.put("[foo][0][p]", 10L); ImmutableOpenMap shardSizes = shardSizesBuilder.build(); final ClusterInfo clusterInfo = new DevNullClusterInfo(usages, usages, shardSizes); @@ -839,10 +840,12 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { DiskThresholdDecider diskThresholdDecider = makeDecider(diskSettings); MetaData metaData = MetaData.builder() .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(0)) + .put(IndexMetaData.builder("foo").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0)) .build(); RoutingTable initialRoutingTable = RoutingTable.builder() .addAsNew(metaData.index("test")) + .addAsNew(metaData.index("foo")) .build(); DiscoveryNode discoveryNode1 = new DiscoveryNode("node1", new LocalTransportAddress("1"), emptyMap(), @@ -881,6 +884,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { // Two shards consuming each 80% of disk space while 70% is allowed, but one is relocating, so shard 0 can stay firstRouting = TestShardRouting.newShardRouting("test", 0, "node1", null, true, ShardRoutingState.STARTED); secondRouting = TestShardRouting.newShardRouting("test", 1, "node1", "node2", true, ShardRoutingState.RELOCATING); + ShardRouting fooRouting = TestShardRouting.newShardRouting("foo", 0, "node1", null, true, ShardRoutingState.UNASSIGNED); firstRoutingNode = new RoutingNode("node1", discoveryNode1, firstRouting, secondRouting); builder = RoutingTable.builder().add( IndexRoutingTable.builder(firstRouting.index()) @@ -898,6 +902,8 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { false); decision = diskThresholdDecider.canRemain(firstRouting, firstRoutingNode, routingAllocation); assertThat(decision.type(), equalTo(Decision.Type.YES)); + decision = diskThresholdDecider.canAllocate(fooRouting, firstRoutingNode, routingAllocation); + assertThat(decision.type(), equalTo(Decision.Type.NO)); // Creating AllocationService instance and the services it depends on... ClusterInfoService cis = new ClusterInfoService() { diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java index 0e0ba4cea0b..a9c15de61dc 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.cluster.routing.allocation.decider; import com.carrotsearch.randomizedtesting.generators.RandomPicks; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -31,7 +32,6 @@ import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.Allocation; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.Rebalance; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -52,7 +52,7 @@ import static org.hamcrest.Matchers.equalTo; */ public class EnableAllocationTests extends ESAllocationTestCase { - private final ESLogger logger = Loggers.getLogger(EnableAllocationTests.class); + private final Logger logger = Loggers.getLogger(EnableAllocationTests.class); public void testClusterEnableNone() { AllocationService strategy = createAllocationService(Settings.builder() diff --git a/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceIT.java b/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceIT.java index b7eb532e104..cf80e8378ab 100644 --- a/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceIT.java @@ -18,6 +18,8 @@ */ package org.elasticsearch.cluster.service; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; @@ -125,7 +127,7 @@ public class ClusterServiceIT extends ESIntegTestCase { @Override public void onFailure(String source, Exception e) { - logger.error("failed to execute callback in test {}", e, source); + logger.error((Supplier) () -> new ParameterizedMessage("failed to execute callback in test {}", source), e); onFailure.set(true); latch.countDown(); } @@ -196,7 +198,7 @@ public class ClusterServiceIT extends ESIntegTestCase { @Override public void onFailure(String source, Exception e) { - logger.error("failed to execute callback in test {}", e, source); + logger.error((Supplier) () -> new ParameterizedMessage("failed to execute callback in test {}", source), e); onFailure.set(true); latch.countDown(); } @@ -270,7 +272,7 @@ public class ClusterServiceIT extends ESIntegTestCase { @Override public void onFailure(String source, Exception e) { - logger.error("failed to execute callback in test {}", e, source); + logger.error((Supplier) () -> new ParameterizedMessage("failed to execute callback in test {}", source), e); onFailure.set(true); latch.countDown(); } @@ -344,7 +346,7 @@ public class ClusterServiceIT extends ESIntegTestCase { @Override public void onFailure(String source, Exception e) { - logger.error("failed to execute callback in test {}", e, source); + logger.error((Supplier) () -> new ParameterizedMessage("failed to execute callback in test {}", source), e); onFailure.set(true); latch.countDown(); } diff --git a/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java index 1002774d2ca..c4c09275e80 100644 --- a/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java @@ -18,8 +18,11 @@ */ package org.elasticsearch.cluster.service; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -35,6 +38,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.Priority; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.logging.TestLoggers; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.LocalTransportAddress; @@ -69,9 +73,7 @@ import java.util.stream.Collectors; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static org.elasticsearch.test.ClusterServiceUtils.setState; -import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.anyOf; -import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; @@ -331,7 +333,7 @@ public class ClusterServiceTests extends ESTestCase { ClusterStateTaskListener listener = new ClusterStateTaskListener() { @Override public void onFailure(String source, Exception e) { - logger.error("unexpected failure: [{}]", e, source); + logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure: [{}]", source), e); failures.add(new Tuple<>(source, e)); updateLatch.countDown(); } @@ -685,8 +687,8 @@ public class ClusterServiceTests extends ESTestCase { mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test3", "cluster.service", Level.DEBUG, "*processing [test3]: took [3s] done applying updated cluster_state (version: *, uuid: *)")); - Logger rootLogger = Logger.getRootLogger(); - rootLogger.addAppender(mockAppender); + Logger rootLogger = LogManager.getRootLogger(); + TestLoggers.addAppender(rootLogger, mockAppender); try { final CountDownLatch latch = new CountDownLatch(4); clusterService.currentTimeOverride = System.nanoTime(); @@ -761,7 +763,7 @@ public class ClusterServiceTests extends ESTestCase { }); latch.await(); } finally { - rootLogger.removeAppender(mockAppender); + TestLoggers.removeAppender(rootLogger, mockAppender); } mockAppender.assertAllExpectationsMatched(); } @@ -778,8 +780,8 @@ public class ClusterServiceTests extends ESTestCase { mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test4", "cluster.service", Level.WARN, "*cluster state update task [test4] took [34s] above the warn threshold of *")); - Logger rootLogger = Logger.getRootLogger(); - rootLogger.addAppender(mockAppender); + Logger rootLogger = LogManager.getRootLogger(); + TestLoggers.addAppender(rootLogger, mockAppender); try { final CountDownLatch latch = new CountDownLatch(5); final CountDownLatch processedFirstTask = new CountDownLatch(1); @@ -875,7 +877,7 @@ public class ClusterServiceTests extends ESTestCase { }); latch.await(); } finally { - rootLogger.removeAppender(mockAppender); + TestLoggers.removeAppender(rootLogger, mockAppender); } mockAppender.assertAllExpectationsMatched(); } diff --git a/core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java b/core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java index 7f423d1bb9e..26bb97fcc04 100644 --- a/core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.settings; +import org.apache.logging.log4j.Level; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequestBuilder; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.elasticsearch.cluster.metadata.MetaData; @@ -329,29 +330,30 @@ public class ClusterSettingsIT extends ESIntegTestCase { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/20318") public void testLoggerLevelUpdate() { assertAcked(prepareCreate("test")); - final String rootLevel = ESLoggerFactory.getRootLogger().getLevel(); - final String testLevel = ESLoggerFactory.getLogger("test").getLevel(); - try { - client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put("logger._root", "BOOM")).execute().actionGet(); - fail("Expected IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertEquals("No enum constant org.elasticsearch.common.logging.ESLoggerFactory.LogLevel.BOOM", e.getMessage()); - } + final IllegalArgumentException e = + expectThrows( + IllegalArgumentException.class, + () -> client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put("logger._root", "BOOM")).execute().actionGet()); + assertEquals("Unknown level constant [BOOM].", e.getMessage()); try { - client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put("logger.test", "TRACE").put("logger._root", "trace")).execute().actionGet(); - assertEquals("TRACE", ESLoggerFactory.getLogger("test").getLevel()); - assertEquals("TRACE", ESLoggerFactory.getRootLogger().getLevel()); + final Settings.Builder testSettings = Settings.builder().put("logger.test", "TRACE").put("logger._root", "trace"); + client().admin().cluster().prepareUpdateSettings().setTransientSettings(testSettings).execute().actionGet(); + assertEquals(Level.TRACE, ESLoggerFactory.getLogger("test").getLevel()); + assertEquals(Level.TRACE, ESLoggerFactory.getRootLogger().getLevel()); } finally { if (randomBoolean()) { - client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().putNull("logger.test").putNull("logger._root")).execute().actionGet(); + final Settings.Builder defaultSettings = Settings.builder().putNull("logger.test").putNull("logger._root"); + client().admin().cluster().prepareUpdateSettings().setTransientSettings(defaultSettings).execute().actionGet(); } else { - client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().putNull("logger.*")).execute().actionGet(); + final Settings.Builder defaultSettings = Settings.builder().putNull("logger.*"); + client().admin().cluster().prepareUpdateSettings().setTransientSettings(defaultSettings).execute().actionGet(); } - assertEquals(testLevel, ESLoggerFactory.getLogger("test").getLevel()); - assertEquals(rootLevel, ESLoggerFactory.getRootLogger().getLevel()); + assertEquals(ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.get(Settings.EMPTY), ESLoggerFactory.getLogger("test").getLevel()); + assertEquals(ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.get(Settings.EMPTY), ESLoggerFactory.getRootLogger().getLevel()); } } diff --git a/core/src/test/java/org/elasticsearch/common/logging/ESLoggerTests.java b/core/src/test/java/org/elasticsearch/common/logging/ESLoggerTests.java deleted file mode 100644 index 8826f456b63..00000000000 --- a/core/src/test/java/org/elasticsearch/common/logging/ESLoggerTests.java +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.logging; - -import org.apache.log4j.AppenderSkeleton; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; -import org.apache.log4j.spi.LocationInfo; -import org.apache.log4j.spi.LoggingEvent; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; -import org.elasticsearch.test.ESTestCase; -import org.junit.After; - -import java.nio.file.Path; -import java.util.ArrayList; -import java.util.List; - -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.notNullValue; - -public class ESLoggerTests extends ESTestCase { - - private ESLogger esTestLogger; - private TestAppender testAppender; - private String testLevel; - private DeprecationLogger deprecationLogger; - private TestAppender deprecationAppender; - - @Override - public void setUp() throws Exception { - super.setUp(); - this.testLevel = ESLoggerFactory.getLogger("test").getLevel(); - LogConfigurator.reset(); - Path configDir = getDataPath("config"); - // Need to set custom path.conf so we can use a custom logging.yml file for the test - Settings settings = Settings.builder() - .put(Environment.PATH_CONF_SETTING.getKey(), configDir.toAbsolutePath()) - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - .build(); - LogConfigurator.configure(settings, true); - - esTestLogger = ESLoggerFactory.getLogger("test"); - Logger testLogger = esTestLogger.getLogger(); - assertThat(testLogger.getLevel(), equalTo(Level.TRACE)); - testAppender = new TestAppender(); - testLogger.addAppender(testAppender); - - // deprecation setup, needs to be set to debug to log - deprecationLogger = ESLoggerFactory.getDeprecationLogger("test"); - deprecationAppender = new TestAppender(); - ESLogger logger = ESLoggerFactory.getLogger("deprecation.test"); - logger.setLevel("DEBUG"); - logger.getLogger().addAppender(deprecationAppender); - } - - @Override - @After - public void tearDown() throws Exception { - super.tearDown(); - esTestLogger.setLevel(testLevel); - Logger testLogger = esTestLogger.getLogger(); - testLogger.removeAppender(testAppender); - Logger deprecationLogger = ESLoggerFactory.getLogger("deprecation.test").getLogger(); - deprecationLogger.removeAppender(deprecationAppender); - } - - public void testLocationInfoTest() { - esTestLogger.error("This is an error"); - esTestLogger.warn("This is a warning"); - esTestLogger.info("This is an info"); - esTestLogger.debug("This is a debug"); - esTestLogger.trace("This is a trace"); - List events = testAppender.getEvents(); - assertThat(events, notNullValue()); - assertThat(events.size(), equalTo(5)); - LoggingEvent event = events.get(0); - assertThat(event, notNullValue()); - assertThat(event.getLevel(), equalTo(Level.ERROR)); - assertThat(event.getRenderedMessage(), equalTo("This is an error")); - LocationInfo locationInfo = event.getLocationInformation(); - assertThat(locationInfo, notNullValue()); - assertThat(locationInfo.getClassName(), equalTo(ESLoggerTests.class.getCanonicalName())); - assertThat(locationInfo.getMethodName(), equalTo("testLocationInfoTest")); - event = events.get(1); - assertThat(event, notNullValue()); - assertThat(event.getLevel(), equalTo(Level.WARN)); - assertThat(event.getRenderedMessage(), equalTo("This is a warning")); - locationInfo = event.getLocationInformation(); - assertThat(locationInfo, notNullValue()); - assertThat(locationInfo.getClassName(), equalTo(ESLoggerTests.class.getCanonicalName())); - assertThat(locationInfo.getMethodName(), equalTo("testLocationInfoTest")); - event = events.get(2); - assertThat(event, notNullValue()); - assertThat(event.getLevel(), equalTo(Level.INFO)); - assertThat(event.getRenderedMessage(), equalTo("This is an info")); - locationInfo = event.getLocationInformation(); - assertThat(locationInfo, notNullValue()); - assertThat(locationInfo.getClassName(), equalTo(ESLoggerTests.class.getCanonicalName())); - assertThat(locationInfo.getMethodName(), equalTo("testLocationInfoTest")); - event = events.get(3); - assertThat(event, notNullValue()); - assertThat(event.getLevel(), equalTo(Level.DEBUG)); - assertThat(event.getRenderedMessage(), equalTo("This is a debug")); - locationInfo = event.getLocationInformation(); - assertThat(locationInfo, notNullValue()); - assertThat(locationInfo.getClassName(), equalTo(ESLoggerTests.class.getCanonicalName())); - assertThat(locationInfo.getMethodName(), equalTo("testLocationInfoTest")); - event = events.get(4); - assertThat(event, notNullValue()); - assertThat(event.getLevel(), equalTo(Level.TRACE)); - assertThat(event.getRenderedMessage(), equalTo("This is a trace")); - locationInfo = event.getLocationInformation(); - assertThat(locationInfo, notNullValue()); - assertThat(locationInfo.getClassName(), equalTo(ESLoggerTests.class.getCanonicalName())); - assertThat(locationInfo.getMethodName(), equalTo("testLocationInfoTest")); - } - - public void testDeprecationLogger() { - deprecationLogger.deprecated("This is a deprecation message"); - List deprecationEvents = deprecationAppender.getEvents(); - LoggingEvent event = deprecationEvents.get(0); - assertThat(event, notNullValue()); - assertThat(event.getLevel(), equalTo(Level.WARN)); - assertThat(event.getRenderedMessage(), equalTo("This is a deprecation message")); - } - - private static class TestAppender extends AppenderSkeleton { - - private List events = new ArrayList<>(); - - @Override - public void close() { - } - - @Override - public boolean requiresLayout() { - return false; - } - - @Override - protected void append(LoggingEvent event) { - // Forces it to generate the location information - event.getLocationInformation(); - events.add(event); - } - - public List getEvents() { - return events; - } - } -} diff --git a/core/src/test/java/org/elasticsearch/common/logging/LoggingConfigurationTests.java b/core/src/test/java/org/elasticsearch/common/logging/LoggingConfigurationTests.java deleted file mode 100644 index 581a9599365..00000000000 --- a/core/src/test/java/org/elasticsearch/common/logging/LoggingConfigurationTests.java +++ /dev/null @@ -1,196 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.logging; - -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.StandardOpenOption; -import java.util.Arrays; - -import org.apache.log4j.Appender; -import org.apache.log4j.Logger; -import org.apache.log4j.MDC; -import org.elasticsearch.cli.MockTerminal; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; -import org.elasticsearch.node.internal.InternalSettingsPreparer; -import org.elasticsearch.test.ESTestCase; -import org.junit.Before; - -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; - -/** - * - */ -public class LoggingConfigurationTests extends ESTestCase { - - @Before - public void before() throws Exception { - LogConfigurator.reset(); - } - - public void testResolveMultipleConfigs() throws Exception { - String level = ESLoggerFactory.getLogger("test").getLevel(); - try { - Path configDir = getDataPath("config"); - Settings settings = Settings.builder() - .put(Environment.PATH_CONF_SETTING.getKey(), configDir.toAbsolutePath()) - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - .build(); - LogConfigurator.configure(settings, true); - - ESLogger esLogger = ESLoggerFactory.getLogger("test"); - Logger logger = esLogger.getLogger(); - Appender appender = logger.getAppender("console"); - assertThat(appender, notNullValue()); - - esLogger = ESLoggerFactory.getLogger("second"); - logger = esLogger.getLogger(); - appender = logger.getAppender("console2"); - assertThat(appender, notNullValue()); - - esLogger = ESLoggerFactory.getLogger("third"); - logger = esLogger.getLogger(); - appender = logger.getAppender("console3"); - assertThat(appender, notNullValue()); - } finally { - ESLoggerFactory.getLogger("test").setLevel(level); - } - } - - public void testResolveJsonLoggingConfig() throws Exception { - Path tmpDir = createTempDir(); - Path loggingConf = tmpDir.resolve(loggingConfiguration("json")); - Files.write(loggingConf, "{\"json\": \"foo\"}".getBytes(StandardCharsets.UTF_8)); - Environment environment = new Environment( - Settings.builder() - .put(Environment.PATH_CONF_SETTING.getKey(), tmpDir.toAbsolutePath()) - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - .build()); - - Settings.Builder builder = Settings.builder(); - LogConfigurator.resolveConfig(environment, builder); - - Settings logSettings = builder.build(); - assertThat(logSettings.get("json"), is("foo")); - } - - public void testResolveYamlLoggingConfig() throws Exception { - Path tmpDir = createTempDir(); - Path loggingConf1 = tmpDir.resolve(loggingConfiguration("yml")); - Path loggingConf2 = tmpDir.resolve(loggingConfiguration("yaml")); - Files.write(loggingConf1, "yml: bar".getBytes(StandardCharsets.UTF_8)); - Files.write(loggingConf2, "yaml: bar".getBytes(StandardCharsets.UTF_8)); - Environment environment = new Environment( - Settings.builder() - .put(Environment.PATH_CONF_SETTING.getKey(), tmpDir.toAbsolutePath()) - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - .build()); - - Settings.Builder builder = Settings.builder(); - LogConfigurator.resolveConfig(environment, builder); - - Settings logSettings = builder.build(); - assertThat(logSettings.get("yml"), is("bar")); - assertThat(logSettings.get("yaml"), is("bar")); - } - - public void testResolveConfigInvalidFilename() throws Exception { - Path tmpDir = createTempDir(); - Path invalidSuffix = tmpDir.resolve(loggingConfiguration(randomFrom(LogConfigurator.ALLOWED_SUFFIXES)) + randomInvalidSuffix()); - Files.write(invalidSuffix, "yml: bar".getBytes(StandardCharsets.UTF_8)); - Environment environment = new Environment( - Settings.builder() - .put(Environment.PATH_CONF_SETTING.getKey(), invalidSuffix.toAbsolutePath()) - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - .build()); - - Settings.Builder builder = Settings.builder(); - LogConfigurator.resolveConfig(environment, builder); - - Settings logSettings = builder.build(); - assertThat(logSettings.get("yml"), nullValue()); - } - - // tests that custom settings are not overwritten by settings in the config file - public void testResolveOrder() throws Exception { - Path tmpDir = createTempDir(); - Path loggingConf = tmpDir.resolve(loggingConfiguration("yaml")); - Files.write(loggingConf, "logger.test_resolve_order: INFO, file\n".getBytes(StandardCharsets.UTF_8)); - Files.write(loggingConf, "appender.file.type: file\n".getBytes(StandardCharsets.UTF_8), StandardOpenOption.APPEND); - Environment environment = InternalSettingsPreparer.prepareEnvironment( - Settings.builder() - .put(Environment.PATH_CONF_SETTING.getKey(), tmpDir.toAbsolutePath()) - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - .put("logger.test_resolve_order", "TRACE, console") - .put("appender.console.type", "console") - .put("appender.console.layout.type", "consolePattern") - .put("appender.console.layout.conversionPattern", "[%d{ISO8601}][%-5p][%-25c] %m%n") - .build(), new MockTerminal()); - LogConfigurator.configure(environment.settings(), true); - // args should overwrite whatever is in the config - ESLogger esLogger = ESLoggerFactory.getLogger("test_resolve_order"); - Logger logger = esLogger.getLogger(); - Appender appender = logger.getAppender("console"); - assertThat(appender, notNullValue()); - assertTrue(logger.isTraceEnabled()); - appender = logger.getAppender("file"); - assertThat(appender, nullValue()); - } - - // tests that config file is not read when we call LogConfigurator.configure(Settings, false) - public void testConfigNotRead() throws Exception { - Path tmpDir = createTempDir(); - Path loggingConf = tmpDir.resolve(loggingConfiguration("yaml")); - Files.write(loggingConf, - Arrays.asList( - "logger.test_config_not_read: INFO, console", - "appender.console.type: console"), - StandardCharsets.UTF_8); - Environment environment = InternalSettingsPreparer.prepareEnvironment( - Settings.builder() - .put(Environment.PATH_CONF_SETTING.getKey(), tmpDir.toAbsolutePath()) - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - .build(), new MockTerminal()); - LogConfigurator.configure(environment.settings(), false); - ESLogger esLogger = ESLoggerFactory.getLogger("test_config_not_read"); - - assertNotNull(esLogger); - Logger logger = esLogger.getLogger(); - Appender appender = logger.getAppender("console"); - // config was not read - assertNull(appender); - } - - private static String loggingConfiguration(String suffix) { - return "logging." + randomAsciiOfLength(randomIntBetween(0, 10)) + "." + suffix; - } - - private static String randomInvalidSuffix() { - String randomSuffix; - do { - randomSuffix = randomAsciiOfLength(randomIntBetween(1, 5)); - } while (LogConfigurator.ALLOWED_SUFFIXES.contains(randomSuffix)); - return randomSuffix; - } -} diff --git a/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java b/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java index dee20d6b32e..bd6cb530b2b 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java @@ -18,11 +18,13 @@ */ package org.elasticsearch.common.settings; +import org.apache.logging.log4j.Level; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.index.IndexModule; import org.elasticsearch.test.ESTestCase; @@ -302,45 +304,44 @@ public class ScopedSettingsTests extends ESTestCase { } public void testLoggingUpdates() { - final String level = ESLoggerFactory.getRootLogger().getLevel(); - final String testLevel = ESLoggerFactory.getLogger("test").getLevel(); - String property = randomFrom(ESLoggerFactory.LogLevel.values()).toString(); + final Level level = ESLoggerFactory.getRootLogger().getLevel(); + final Level testLevel = ESLoggerFactory.getLogger("test").getLevel(); + Level property = randomFrom(Level.values()); Settings.Builder builder = Settings.builder().put("logger.level", property); try { ClusterSettings settings = new ClusterSettings(builder.build(), ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - try { - settings.validate(Settings.builder().put("logger._root", "boom").build()); - fail(); - } catch (IllegalArgumentException ex) { - assertEquals("No enum constant org.elasticsearch.common.logging.ESLoggerFactory.LogLevel.BOOM", ex.getMessage()); - } + IllegalArgumentException ex = + expectThrows( + IllegalArgumentException.class, + () -> settings.validate(Settings.builder().put("logger._root", "boom").build())); + assertEquals("Unknown level constant [BOOM].", ex.getMessage()); assertEquals(level, ESLoggerFactory.getRootLogger().getLevel()); settings.applySettings(Settings.builder().put("logger._root", "TRACE").build()); - assertEquals("TRACE", ESLoggerFactory.getRootLogger().getLevel()); + assertEquals(Level.TRACE, ESLoggerFactory.getRootLogger().getLevel()); settings.applySettings(Settings.builder().build()); assertEquals(property, ESLoggerFactory.getRootLogger().getLevel()); settings.applySettings(Settings.builder().put("logger.test", "TRACE").build()); - assertEquals("TRACE", ESLoggerFactory.getLogger("test").getLevel()); + assertEquals(Level.TRACE, ESLoggerFactory.getLogger("test").getLevel()); settings.applySettings(Settings.builder().build()); - assertEquals(testLevel, ESLoggerFactory.getLogger("test").getLevel()); + assertEquals(property, ESLoggerFactory.getLogger("test").getLevel()); } finally { - ESLoggerFactory.getRootLogger().setLevel(level); - ESLoggerFactory.getLogger("test").setLevel(testLevel); + Loggers.setLevel(ESLoggerFactory.getRootLogger(), level); + Loggers.setLevel(ESLoggerFactory.getLogger("test"), testLevel); } } public void testFallbackToLoggerLevel() { - final String level = ESLoggerFactory.getRootLogger().getLevel(); + final Level level = ESLoggerFactory.getRootLogger().getLevel(); try { - ClusterSettings settings = new ClusterSettings(Settings.builder().put("logger.level", "ERROR").build(), - ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + ClusterSettings settings = + new ClusterSettings(Settings.builder().put("logger.level", "ERROR").build(), ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); assertEquals(level, ESLoggerFactory.getRootLogger().getLevel()); settings.applySettings(Settings.builder().put("logger._root", "TRACE").build()); - assertEquals("TRACE", ESLoggerFactory.getRootLogger().getLevel()); + assertEquals(Level.TRACE, ESLoggerFactory.getRootLogger().getLevel()); settings.applySettings(Settings.builder().build()); // here we fall back to 'logger.level' which is our default. - assertEquals("ERROR", ESLoggerFactory.getRootLogger().getLevel()); + assertEquals(Level.ERROR, ESLoggerFactory.getRootLogger().getLevel()); } finally { - ESLoggerFactory.getRootLogger().setLevel(level); + Loggers.setLevel(ESLoggerFactory.getRootLogger(), level); } } diff --git a/core/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java b/core/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java index 692134916ef..dc0545624d6 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java @@ -144,14 +144,9 @@ public class SettingsModuleTests extends ModuleTestCase { { Settings settings = Settings.builder().put("logger._root", "BOOM").put("logger.transport", "WOW").build(); - try { - new SettingsModule(settings); - fail(); - } catch (IllegalArgumentException ex) { - assertEquals("No enum constant org.elasticsearch.common.logging.ESLoggerFactory.LogLevel.BOOM", ex.getMessage()); - } + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> new SettingsModule(settings)); + assertEquals("Unknown level constant [BOOM].", ex.getMessage()); } - } public void testRegisterSettingsFilter() { diff --git a/core/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java b/core/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java index b075e9d56d7..5104e56cb35 100644 --- a/core/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java +++ b/core/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java @@ -20,9 +20,13 @@ package org.elasticsearch.common.unit; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.test.ESTestCase; import org.hamcrest.MatcherAssert; +import java.io.IOException; + import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -165,4 +169,15 @@ public class ByteSizeValueTests extends ESTestCase { assertThat(e.getMessage(), containsString("failed to parse setting [test]")); } } + + public void testSerialization() throws IOException { + ByteSizeValue byteSizeValue = new ByteSizeValue(randomPositiveLong(), randomFrom(ByteSizeUnit.values())); + try (BytesStreamOutput out = new BytesStreamOutput()) { + byteSizeValue.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + ByteSizeValue deserializedByteSizeValue = new ByteSizeValue(in); + assertEquals(byteSizeValue, deserializedByteSizeValue); + } + } + } } diff --git a/core/src/test/java/org/elasticsearch/common/util/concurrent/AbstractLifecycleRunnableTests.java b/core/src/test/java/org/elasticsearch/common/util/concurrent/AbstractLifecycleRunnableTests.java index 02adb783197..3f712c44d3c 100644 --- a/core/src/test/java/org/elasticsearch/common/util/concurrent/AbstractLifecycleRunnableTests.java +++ b/core/src/test/java/org/elasticsearch/common/util/concurrent/AbstractLifecycleRunnableTests.java @@ -18,11 +18,10 @@ */ package org.elasticsearch.common.util.concurrent; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.SuppressLoggerChecks; import org.elasticsearch.common.component.Lifecycle; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.test.ESTestCase; - import org.mockito.InOrder; import java.util.concurrent.Callable; @@ -38,7 +37,7 @@ import static org.mockito.Mockito.when; */ public class AbstractLifecycleRunnableTests extends ESTestCase { private final Lifecycle lifecycle = mock(Lifecycle.class); - private final ESLogger logger = mock(ESLogger.class); + private final Logger logger = mock(Logger.class); public void testDoRunOnlyRunsWhenNotStoppedOrClosed() throws Exception { Callable runCallable = mock(Callable.class); diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java b/core/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java index 461428581c5..a602e81b854 100644 --- a/core/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java +++ b/core/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java @@ -19,13 +19,19 @@ package org.elasticsearch.common.xcontent; +import com.fasterxml.jackson.core.JsonGenerator; + import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.test.ESTestCase; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.util.Map; public abstract class BaseXContentTestCase extends ESTestCase { @@ -156,4 +162,24 @@ public abstract class BaseXContentTestCase extends ESTestCase { assertNull(parser.nextToken()); } + + protected void doTestBigInteger(JsonGenerator generator, ByteArrayOutputStream os) throws Exception { + // Big integers cannot be handled explicitly, but if some values happen to be big ints, + // we can still call parser.map() and get the bigint value so that eg. source filtering + // keeps working + BigInteger bigInteger = BigInteger.valueOf(Long.MAX_VALUE).add(BigInteger.ONE); + generator.writeStartObject(); + generator.writeFieldName("foo"); + generator.writeString("bar"); + generator.writeFieldName("bigint"); + generator.writeNumber(bigInteger); + generator.writeEndObject(); + generator.flush(); + byte[] serialized = os.toByteArray(); + + XContentParser parser = xcontentType().xContent().createParser(serialized); + Map map = parser.map(); + assertEquals("bar", map.get("foo")); + assertEquals(bigInteger, map.get("bigint")); + } } diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/cbor/CborXContentTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/cbor/CborXContentTests.java index 928b8a6a5a9..4b2e7a9695a 100644 --- a/core/src/test/java/org/elasticsearch/common/xcontent/cbor/CborXContentTests.java +++ b/core/src/test/java/org/elasticsearch/common/xcontent/cbor/CborXContentTests.java @@ -19,9 +19,14 @@ package org.elasticsearch.common.xcontent.cbor; +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.dataformat.cbor.CBORFactory; + import org.elasticsearch.common.xcontent.BaseXContentTestCase; import org.elasticsearch.common.xcontent.XContentType; +import java.io.ByteArrayOutputStream; + public class CborXContentTests extends BaseXContentTestCase { @Override @@ -29,4 +34,9 @@ public class CborXContentTests extends BaseXContentTestCase { return XContentType.CBOR; } + public void testBigInteger() throws Exception { + ByteArrayOutputStream os = new ByteArrayOutputStream(); + JsonGenerator generator = new CBORFactory().createGenerator(os); + doTestBigInteger(generator, os); + } } diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/json/JsonXContentTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/json/JsonXContentTests.java index 8a739eef4b8..4a79ddb4ec6 100644 --- a/core/src/test/java/org/elasticsearch/common/xcontent/json/JsonXContentTests.java +++ b/core/src/test/java/org/elasticsearch/common/xcontent/json/JsonXContentTests.java @@ -19,9 +19,14 @@ package org.elasticsearch.common.xcontent.json; +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.JsonGenerator; + import org.elasticsearch.common.xcontent.BaseXContentTestCase; import org.elasticsearch.common.xcontent.XContentType; +import java.io.ByteArrayOutputStream; + public class JsonXContentTests extends BaseXContentTestCase { @Override @@ -29,4 +34,9 @@ public class JsonXContentTests extends BaseXContentTestCase { return XContentType.JSON; } + public void testBigInteger() throws Exception { + ByteArrayOutputStream os = new ByteArrayOutputStream(); + JsonGenerator generator = new JsonFactory().createGenerator(os); + doTestBigInteger(generator, os); + } } diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/smile/SmileXContentTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/smile/SmileXContentTests.java index 6961e84416d..71f64ab5028 100644 --- a/core/src/test/java/org/elasticsearch/common/xcontent/smile/SmileXContentTests.java +++ b/core/src/test/java/org/elasticsearch/common/xcontent/smile/SmileXContentTests.java @@ -19,9 +19,14 @@ package org.elasticsearch.common.xcontent.smile; +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.dataformat.smile.SmileFactory; + import org.elasticsearch.common.xcontent.BaseXContentTestCase; import org.elasticsearch.common.xcontent.XContentType; +import java.io.ByteArrayOutputStream; + public class SmileXContentTests extends BaseXContentTestCase { @Override @@ -29,4 +34,9 @@ public class SmileXContentTests extends BaseXContentTestCase { return XContentType.SMILE; } + public void testBigInteger() throws Exception { + ByteArrayOutputStream os = new ByteArrayOutputStream(); + JsonGenerator generator = new SmileFactory().createGenerator(os); + doTestBigInteger(generator, os); + } } diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/yaml/YamlXContentTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/yaml/YamlXContentTests.java index 17c2a590ec1..3bfaa421882 100644 --- a/core/src/test/java/org/elasticsearch/common/xcontent/yaml/YamlXContentTests.java +++ b/core/src/test/java/org/elasticsearch/common/xcontent/yaml/YamlXContentTests.java @@ -19,9 +19,14 @@ package org.elasticsearch.common.xcontent.yaml; +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.dataformat.yaml.YAMLFactory; + import org.elasticsearch.common.xcontent.BaseXContentTestCase; import org.elasticsearch.common.xcontent.XContentType; +import java.io.ByteArrayOutputStream; + public class YamlXContentTests extends BaseXContentTestCase { @Override @@ -29,4 +34,9 @@ public class YamlXContentTests extends BaseXContentTestCase { return XContentType.YAML; } + public void testBigInteger() throws Exception { + ByteArrayOutputStream os = new ByteArrayOutputStream(); + JsonGenerator generator = new YAMLFactory().createGenerator(os); + doTestBigInteger(generator, os); + } } diff --git a/core/src/test/java/org/elasticsearch/discovery/BlockingClusterStatePublishResponseHandlerTests.java b/core/src/test/java/org/elasticsearch/discovery/BlockingClusterStatePublishResponseHandlerTests.java index 4ff4c4cd035..481d15020fc 100644 --- a/core/src/test/java/org/elasticsearch/discovery/BlockingClusterStatePublishResponseHandlerTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/BlockingClusterStatePublishResponseHandlerTests.java @@ -18,9 +18,9 @@ */ package org.elasticsearch.discovery; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; @@ -45,10 +45,10 @@ public class BlockingClusterStatePublishResponseHandlerTests extends ESTestCase final boolean fail; final DiscoveryNode node; final CyclicBarrier barrier; - final ESLogger logger; + final Logger logger; final BlockingClusterStatePublishResponseHandler handler; - public PublishResponder(boolean fail, DiscoveryNode node, CyclicBarrier barrier, ESLogger logger, BlockingClusterStatePublishResponseHandler handler) { + public PublishResponder(boolean fail, DiscoveryNode node, CyclicBarrier barrier, Logger logger, BlockingClusterStatePublishResponseHandler handler) { this.fail = fail; this.node = node; diff --git a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java index 517bd2c7383..fe06f9a85d4 100644 --- a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java @@ -19,6 +19,8 @@ package org.elasticsearch.discovery; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.CorruptIndexException; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.DocWriteResponse; @@ -69,11 +71,11 @@ import org.elasticsearch.test.disruption.IntermittentLongGCDisruption; import org.elasticsearch.test.disruption.LongGCDisruption; import org.elasticsearch.test.disruption.NetworkDisruption; import org.elasticsearch.test.disruption.NetworkDisruption.Bridge; +import org.elasticsearch.test.disruption.NetworkDisruption.DisruptedLinks; import org.elasticsearch.test.disruption.NetworkDisruption.NetworkDelay; import org.elasticsearch.test.disruption.NetworkDisruption.NetworkDisconnect; import org.elasticsearch.test.disruption.NetworkDisruption.NetworkLinkDisruptionType; import org.elasticsearch.test.disruption.NetworkDisruption.NetworkUnresponsive; -import org.elasticsearch.test.disruption.NetworkDisruption.DisruptedLinks; import org.elasticsearch.test.disruption.NetworkDisruption.TwoPartitions; import org.elasticsearch.test.disruption.ServiceDisruptionScheme; import org.elasticsearch.test.disruption.SingleNodeDisruption; @@ -507,7 +509,10 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { logger.trace("[{}] indexed id [{}] through node [{}]", name, id, node); } catch (ElasticsearchException e) { exceptedExceptions.add(e); - logger.trace("[{}] failed id [{}] through node [{}]", e, name, id, node); + final String docId = id; + logger.trace( + (Supplier) + () -> new ParameterizedMessage("[{}] failed id [{}] through node [{}]", name, docId, node), e); } finally { countDownLatchRef.get().countDown(); logger.trace("[{}] decreased counter : {}", name, countDownLatchRef.get().getCount()); @@ -715,7 +720,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { @Override public void onFailure(String source, Exception e) { - logger.warn("failure [{}]", e, source); + logger.warn((Supplier) () -> new ParameterizedMessage("failure [{}]", source), e); } }); diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java index 4b37b106dbc..9a111152a1a 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java @@ -18,6 +18,8 @@ */ package org.elasticsearch.discovery.zen; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; @@ -76,6 +78,7 @@ import java.util.stream.StreamSupport; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static java.util.Collections.shuffle; +import static org.elasticsearch.cluster.ESAllocationTestCase.createAllocationService; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_CREATION_DATE; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; @@ -83,7 +86,6 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_C import static org.elasticsearch.cluster.routing.RoutingTableTests.updateActiveAllocations; import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; import static org.elasticsearch.test.ClusterServiceUtils.setState; -import static org.elasticsearch.cluster.ESAllocationTestCase.createAllocationService; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; @@ -722,7 +724,7 @@ public class NodeJoinControllerTests extends ESTestCase { @Override public void onFailure(Exception e) { - logger.error("unexpected error for {}", e, future); + logger.error((Supplier) () -> new ParameterizedMessage("unexpected error for {}", future), e); future.markAsFailed(e); } }); diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java index b1658845afd..31c828ec30f 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.discovery.zen.publish; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -36,7 +37,6 @@ import org.elasticsearch.common.Randomness; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.Discovery; @@ -98,9 +98,9 @@ public class PublishClusterStateActionTests extends ESTestCase { public volatile ClusterState clusterState; - private final ESLogger logger; + private final Logger logger; - public MockNode(DiscoveryNode discoveryNode, MockTransportService service, @Nullable ClusterStateListener listener, ESLogger logger) { + public MockNode(DiscoveryNode discoveryNode, MockTransportService service, @Nullable ClusterStateListener listener, Logger logger) { this.discoveryNode = discoveryNode; this.service = service; this.listener = listener; diff --git a/core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java b/core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java index b6ec9c6e932..9b340fd863a 100644 --- a/core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java +++ b/core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.gateway; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; @@ -33,7 +34,6 @@ import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -64,7 +64,7 @@ import static org.hamcrest.Matchers.nullValue; @ClusterScope(scope = Scope.TEST, numDataNodes = 0) public class GatewayIndexStateIT extends ESIntegTestCase { - private final ESLogger logger = Loggers.getLogger(GatewayIndexStateIT.class); + private final Logger logger = Loggers.getLogger(GatewayIndexStateIT.class); public void testMappingMetaDataParsed() throws Exception { logger.info("--> starting 1 nodes"); diff --git a/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java b/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java index 4cf505d839a..0f0e69b2643 100644 --- a/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.gateway; +import org.apache.logging.log4j.Logger; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.store.ChecksumIndexInput; import org.apache.lucene.store.Directory; @@ -34,7 +35,6 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -193,7 +193,7 @@ public class MetaDataStateFormatTests extends ESTestCase { } } - public static void corruptFile(Path file, ESLogger logger) throws IOException { + public static void corruptFile(Path file, Logger logger) throws IOException { Path fileToCorrupt = file; try (final SimpleFSDirectory dir = new SimpleFSDirectory(fileToCorrupt.getParent())) { long checksumBeforeCorruption; diff --git a/core/src/test/java/org/elasticsearch/gateway/ReusePeerRecoverySharedTest.java b/core/src/test/java/org/elasticsearch/gateway/ReusePeerRecoverySharedTest.java index a6f4e6db23b..6454f8a2209 100644 --- a/core/src/test/java/org/elasticsearch/gateway/ReusePeerRecoverySharedTest.java +++ b/core/src/test/java/org/elasticsearch/gateway/ReusePeerRecoverySharedTest.java @@ -19,11 +19,11 @@ package org.elasticsearch.gateway; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; import org.elasticsearch.action.admin.indices.stats.IndexStats; import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.indices.recovery.RecoveryState; @@ -56,7 +56,7 @@ public class ReusePeerRecoverySharedTest { * should this use synced flush? can't use synced from in the bwc * tests */ - public static void testCase(Settings indexSettings, Runnable restartCluster, ESLogger logger, boolean useSyncIds) { + public static void testCase(Settings indexSettings, Runnable restartCluster, Logger logger, boolean useSyncIds) { /* * prevent any rebalance actions during the peer recovery if we run into * a relocation the reuse count will be 0 and this fails the test. We diff --git a/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java b/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java index 4d962fb6c88..989c60fc916 100644 --- a/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java +++ b/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java @@ -594,6 +594,12 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase { * Tests that shadow replicas can be "naturally" rebalanced and relocated * around the cluster. By "naturally" I mean without using the reroute API */ + // This test failed on CI when trying to assert that all the shard data has been deleted + // from the index path. It has not been reproduced locally. Despite the IndicesService + // deleting the index and hence, deleting all the shard data for the index, the test + // failure still showed some Lucene files in the data directory for that index. Not sure + // why that is, so turning on more logging here. + @TestLogging("indices:TRACE,env:TRACE") public void testShadowReplicaNaturalRelocation() throws Exception { Path dataPath = createTempDir(); Settings nodeSettings = nodeSettings(dataPath); diff --git a/core/src/test/java/org/elasticsearch/index/analysis/synonyms/SynonymsAnalysisTests.java b/core/src/test/java/org/elasticsearch/index/analysis/synonyms/SynonymsAnalysisTests.java index 9e4d5b27ad7..de271d720c3 100644 --- a/core/src/test/java/org/elasticsearch/index/analysis/synonyms/SynonymsAnalysisTests.java +++ b/core/src/test/java/org/elasticsearch/index/analysis/synonyms/SynonymsAnalysisTests.java @@ -19,12 +19,12 @@ package org.elasticsearch.index.analysis.synonyms; +import org.apache.logging.log4j.Logger; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.lucene.all.AllTokenStream; import org.elasticsearch.common.settings.Settings; @@ -45,7 +45,7 @@ import static org.hamcrest.Matchers.equalTo; /** */ public class SynonymsAnalysisTests extends ESTestCase { - protected final ESLogger logger = Loggers.getLogger(getClass()); + protected final Logger logger = Loggers.getLogger(getClass()); private AnalysisService analysisService; public void testSynonymsAnalysis() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 4715264d986..7b62966515d 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -20,11 +20,12 @@ package org.elasticsearch.index.engine; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import org.apache.log4j.AppenderSkeleton; -import org.apache.log4j.Level; -import org.apache.log4j.LogManager; -import org.apache.log4j.Logger; -import org.apache.log4j.spi.LoggingEvent; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.LogEvent; +import org.apache.logging.log4j.core.appender.AbstractAppender; +import org.apache.logging.log4j.core.filter.RegexFilter; import org.apache.lucene.codecs.Codec; import org.apache.lucene.document.Field; import org.apache.lucene.document.NumericDocValuesField; @@ -52,14 +53,16 @@ import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.TestUtil; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.FileSystemUtils; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.PrefixMessageFactory; +import org.elasticsearch.common.logging.TestLoggers; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; @@ -96,11 +99,9 @@ import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.TranslogConfig; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.mapper.MapperRegistry; -import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.DummyShardLock; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; -import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.hamcrest.MatcherAssert; @@ -116,7 +117,6 @@ import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; import java.util.Base64; -import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Locale; @@ -172,10 +172,10 @@ public class InternalEngineTests extends ESTestCase { codecName = "default"; } defaultSettings = IndexSettingsModule.newIndexSettings("test", Settings.builder() - .put(IndexSettings.INDEX_GC_DELETES_SETTING, "1h") // make sure this doesn't kick in on us + .put(IndexSettings.INDEX_GC_DELETES_SETTING.getKey(), "1h") // make sure this doesn't kick in on us .put(EngineConfig.INDEX_CODEC_SETTING.getKey(), codecName) .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .put(IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD, + .put(IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD.getKey(), between(10, 10 * IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD.get(Settings.EMPTY))) .build()); // TODO randomize more settings threadPool = new TestThreadPool(getClass().getName()); @@ -207,7 +207,8 @@ public class InternalEngineTests extends ESTestCase { return new EngineConfig(openMode, config.getShardId(), config.getThreadPool(), config.getIndexSettings(), config.getWarmer(), config.getStore(), config.getDeletionPolicy(), config.getMergePolicy(), config.getAnalyzer(), config.getSimilarity(), new CodecService(null, logger), config.getEventListener(), config.getTranslogRecoveryPerformer(), config.getQueryCache(), - config.getQueryCachingPolicy(), config.getTranslogConfig(), config.getFlushMergesAfter(), config.getRefreshListeners()); + config.getQueryCachingPolicy(), config.getTranslogConfig(), config.getFlushMergesAfter(), config.getRefreshListeners(), + config.getMaxUnsafeAutoIdTimestamp()); } @Override @@ -277,7 +278,7 @@ public class InternalEngineTests extends ESTestCase { } protected InternalEngine createEngine(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy) throws IOException { - EngineConfig config = config(indexSettings, store, translogPath, mergePolicy); + EngineConfig config = config(indexSettings, store, translogPath, mergePolicy, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP); InternalEngine internalEngine = new InternalEngine(config); if (config.getOpenMode() == EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG) { internalEngine.recoverFromTranslog(); @@ -285,7 +286,7 @@ public class InternalEngineTests extends ESTestCase { return internalEngine; } - public EngineConfig config(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy) { + public EngineConfig config(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy, long maxUnsafeAutoIdTimestamp) { IndexWriterConfig iwc = newIndexWriterConfig(); TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE); final EngineConfig.OpenMode openMode; @@ -307,7 +308,7 @@ public class InternalEngineTests extends ESTestCase { EngineConfig config = new EngineConfig(openMode, shardId, threadPool, indexSettings, null, store, createSnapshotDeletionPolicy(), mergePolicy, iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), listener, new TranslogHandler(shardId.getIndexName(), logger), IndexSearcher.getDefaultQueryCache(), - IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5), null); + IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5), null, maxUnsafeAutoIdTimestamp); return config; } @@ -613,7 +614,7 @@ public class InternalEngineTests extends ESTestCase { for (int i = 0; i < ops; i++) { final ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), new BytesArray("{}".getBytes(Charset.defaultCharset())), null); if (randomBoolean()) { - final Engine.Index operation = new Engine.Index(newUid("test#1"), doc, i, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime()); + final Engine.Index operation = new Engine.Index(newUid("test#1"), doc, i, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false); operations.add(operation); initialEngine.index(operation); } else { @@ -904,7 +905,7 @@ public class InternalEngineTests extends ESTestCase { public void testSyncedFlush() throws IOException { try (Store store = createStore(); Engine engine = new InternalEngine(config(defaultSettings, store, createTempDir(), - new LogByteSizeMergePolicy()))) { + new LogByteSizeMergePolicy(), IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP))) { final String syncId = randomUnicodeOfCodepointLengthBetween(10, 20); ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null); engine.index(new Engine.Index(newUid("1"), doc)); @@ -931,7 +932,7 @@ public class InternalEngineTests extends ESTestCase { for (int i = 0; i < iters; i++) { try (Store store = createStore(); InternalEngine engine = new InternalEngine(config(defaultSettings, store, createTempDir(), - new LogDocMergePolicy()))) { + new LogDocMergePolicy(), IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP))) { final String syncId = randomUnicodeOfCodepointLengthBetween(10, 20); ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null); Engine.Index doc1 = new Engine.Index(newUid("1"), doc); @@ -944,7 +945,7 @@ public class InternalEngineTests extends ESTestCase { engine.flush(); final boolean forceMergeFlushes = randomBoolean(); if (forceMergeFlushes) { - engine.index(new Engine.Index(newUid("3"), doc, Versions.MATCH_ANY, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime() - engine.engineConfig.getFlushMergesAfter().nanos())); + engine.index(new Engine.Index(newUid("3"), doc, Versions.MATCH_ANY, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime() - engine.engineConfig.getFlushMergesAfter().nanos(), -1, false)); } else { engine.index(new Engine.Index(newUid("3"), doc)); } @@ -1031,7 +1032,7 @@ public class InternalEngineTests extends ESTestCase { engine.index(create); assertThat(create.version(), equalTo(1L)); - create = new Engine.Index(newUid("1"), doc, create.version(), create.versionType().versionTypeForReplicationAndRecovery(), REPLICA, 0); + create = new Engine.Index(newUid("1"), doc, create.version(), create.versionType().versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false); replicaEngine.index(create); assertThat(create.version(), equalTo(1L)); } @@ -1042,18 +1043,18 @@ public class InternalEngineTests extends ESTestCase { engine.index(index); assertThat(index.version(), equalTo(1L)); - index = new Engine.Index(newUid("1"), doc, index.version(), index.versionType().versionTypeForReplicationAndRecovery(), REPLICA, 0); + index = new Engine.Index(newUid("1"), doc, index.version(), index.versionType().versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false); replicaEngine.index(index); assertThat(index.version(), equalTo(1L)); } public void testExternalVersioningNewIndex() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); - Engine.Index index = new Engine.Index(newUid("1"), doc, 12, VersionType.EXTERNAL, PRIMARY, 0); + Engine.Index index = new Engine.Index(newUid("1"), doc, 12, VersionType.EXTERNAL, PRIMARY, 0, -1, false); engine.index(index); assertThat(index.version(), equalTo(12L)); - index = new Engine.Index(newUid("1"), doc, index.version(), index.versionType().versionTypeForReplicationAndRecovery(), REPLICA, 0); + index = new Engine.Index(newUid("1"), doc, index.version(), index.versionType().versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false); replicaEngine.index(index); assertThat(index.version(), equalTo(12L)); } @@ -1068,7 +1069,7 @@ public class InternalEngineTests extends ESTestCase { engine.index(index); assertThat(index.version(), equalTo(2L)); - index = new Engine.Index(newUid("1"), doc, 1L, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, 0); + index = new Engine.Index(newUid("1"), doc, 1L, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, 0, -1, false); try { engine.index(index); fail(); @@ -1077,7 +1078,7 @@ public class InternalEngineTests extends ESTestCase { } // future versions should not work as well - index = new Engine.Index(newUid("1"), doc, 3L, VersionType.INTERNAL, PRIMARY, 0); + index = new Engine.Index(newUid("1"), doc, 3L, VersionType.INTERNAL, PRIMARY, 0, -1, false); try { engine.index(index); fail(); @@ -1088,15 +1089,15 @@ public class InternalEngineTests extends ESTestCase { public void testExternalVersioningIndexConflict() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); - Engine.Index index = new Engine.Index(newUid("1"), doc, 12, VersionType.EXTERNAL, PRIMARY, 0); + Engine.Index index = new Engine.Index(newUid("1"), doc, 12, VersionType.EXTERNAL, PRIMARY, 0, -1, false); engine.index(index); assertThat(index.version(), equalTo(12L)); - index = new Engine.Index(newUid("1"), doc, 14, VersionType.EXTERNAL, PRIMARY, 0); + index = new Engine.Index(newUid("1"), doc, 14, VersionType.EXTERNAL, PRIMARY, 0, -1, false); engine.index(index); assertThat(index.version(), equalTo(14L)); - index = new Engine.Index(newUid("1"), doc, 13, VersionType.EXTERNAL, PRIMARY, 0); + index = new Engine.Index(newUid("1"), doc, 13, VersionType.EXTERNAL, PRIMARY, 0, -1, false); try { engine.index(index); fail(); @@ -1117,7 +1118,7 @@ public class InternalEngineTests extends ESTestCase { engine.flush(); - index = new Engine.Index(newUid("1"), doc, 1L, VersionType.INTERNAL, PRIMARY, 0); + index = new Engine.Index(newUid("1"), doc, 1L, VersionType.INTERNAL, PRIMARY, 0, -1, false); try { engine.index(index); fail(); @@ -1126,7 +1127,7 @@ public class InternalEngineTests extends ESTestCase { } // future versions should not work as well - index = new Engine.Index(newUid("1"), doc, 3L, VersionType.INTERNAL, PRIMARY, 0); + index = new Engine.Index(newUid("1"), doc, 3L, VersionType.INTERNAL, PRIMARY, 0, -1, false); try { engine.index(index); fail(); @@ -1137,17 +1138,17 @@ public class InternalEngineTests extends ESTestCase { public void testExternalVersioningIndexConflictWithFlush() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); - Engine.Index index = new Engine.Index(newUid("1"), doc, 12, VersionType.EXTERNAL, PRIMARY, 0); + Engine.Index index = new Engine.Index(newUid("1"), doc, 12, VersionType.EXTERNAL, PRIMARY, 0, -1, false); engine.index(index); assertThat(index.version(), equalTo(12L)); - index = new Engine.Index(newUid("1"), doc, 14, VersionType.EXTERNAL, PRIMARY, 0); + index = new Engine.Index(newUid("1"), doc, 14, VersionType.EXTERNAL, PRIMARY, 0, -1, false); engine.index(index); assertThat(index.version(), equalTo(14L)); engine.flush(); - index = new Engine.Index(newUid("1"), doc, 13, VersionType.EXTERNAL, PRIMARY, 0); + index = new Engine.Index(newUid("1"), doc, 13, VersionType.EXTERNAL, PRIMARY, 0, -1, false); try { engine.index(index); fail(); @@ -1159,7 +1160,7 @@ public class InternalEngineTests extends ESTestCase { public void testForceMerge() throws IOException { try (Store store = createStore(); Engine engine = new InternalEngine(config(defaultSettings, store, createTempDir(), - new LogByteSizeMergePolicy()))) { // use log MP here we test some behavior in ESMP + new LogByteSizeMergePolicy(), IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP))) { // use log MP here we test some behavior in ESMP int numDocs = randomIntBetween(10, 100); for (int i = 0; i < numDocs; i++) { ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), B_1, null); @@ -1284,7 +1285,7 @@ public class InternalEngineTests extends ESTestCase { assertThat(delete.version(), equalTo(3L)); // now check if we can index to a delete doc with version - index = new Engine.Index(newUid("1"), doc, 2L, VersionType.INTERNAL, PRIMARY, 0); + index = new Engine.Index(newUid("1"), doc, 2L, VersionType.INTERNAL, PRIMARY, 0, -1, false); try { engine.index(index); fail(); @@ -1293,7 +1294,7 @@ public class InternalEngineTests extends ESTestCase { } // we shouldn't be able to create as well - Engine.Index create = new Engine.Index(newUid("1"), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0); + Engine.Index create = new Engine.Index(newUid("1"), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0, -1, false); try { engine.index(create); } catch (VersionConflictEngineException e) { @@ -1340,7 +1341,7 @@ public class InternalEngineTests extends ESTestCase { engine.flush(); // now check if we can index to a delete doc with version - index = new Engine.Index(newUid("1"), doc, 2L, VersionType.INTERNAL, PRIMARY, 0); + index = new Engine.Index(newUid("1"), doc, 2L, VersionType.INTERNAL, PRIMARY, 0, -1, false); try { engine.index(index); fail(); @@ -1349,7 +1350,7 @@ public class InternalEngineTests extends ESTestCase { } // we shouldn't be able to create as well - Engine.Index create = new Engine.Index(newUid("1"), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0); + Engine.Index create = new Engine.Index(newUid("1"), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0, -1, false); try { engine.index(create); } catch (VersionConflictEngineException e) { @@ -1359,11 +1360,11 @@ public class InternalEngineTests extends ESTestCase { public void testVersioningCreateExistsException() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); - Engine.Index create = new Engine.Index(newUid("1"), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0); + Engine.Index create = new Engine.Index(newUid("1"), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0, -1, false); engine.index(create); assertThat(create.version(), equalTo(1L)); - create = new Engine.Index(newUid("1"), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0); + create = new Engine.Index(newUid("1"), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0, -1, false); try { engine.index(create); fail(); @@ -1374,13 +1375,13 @@ public class InternalEngineTests extends ESTestCase { public void testVersioningCreateExistsExceptionWithFlush() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); - Engine.Index create = new Engine.Index(newUid("1"), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0); + Engine.Index create = new Engine.Index(newUid("1"), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0, -1, false); engine.index(create); assertThat(create.version(), equalTo(1L)); engine.flush(); - create = new Engine.Index(newUid("1"), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0); + create = new Engine.Index(newUid("1"), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0, -1, false); try { engine.index(create); fail(); @@ -1400,12 +1401,12 @@ public class InternalEngineTests extends ESTestCase { assertThat(index.version(), equalTo(2L)); // apply the second index to the replica, should work fine - index = new Engine.Index(newUid("1"), doc, index.version(), VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0); + index = new Engine.Index(newUid("1"), doc, index.version(), VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false); replicaEngine.index(index); assertThat(index.version(), equalTo(2L)); // now, the old one should not work - index = new Engine.Index(newUid("1"), doc, 1L, VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0); + index = new Engine.Index(newUid("1"), doc, 1L, VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false); try { replicaEngine.index(index); fail(); @@ -1416,7 +1417,7 @@ public class InternalEngineTests extends ESTestCase { // second version on replica should fail as well try { index = new Engine.Index(newUid("1"), doc, 2L - , VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0); + , VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false); replicaEngine.index(index); assertThat(index.version(), equalTo(2L)); } catch (VersionConflictEngineException e) { @@ -1432,7 +1433,7 @@ public class InternalEngineTests extends ESTestCase { // apply the first index to the replica, should work fine index = new Engine.Index(newUid("1"), doc, 1L - , VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0); + , VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false); replicaEngine.index(index); assertThat(index.version(), equalTo(1L)); @@ -1464,7 +1465,7 @@ public class InternalEngineTests extends ESTestCase { // now do the second index on the replica, it should fail try { - index = new Engine.Index(newUid("1"), doc, 2L, VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0); + index = new Engine.Index(newUid("1"), doc, 2L, VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false); replicaEngine.index(index); fail("excepted VersionConflictEngineException to be thrown"); } catch (VersionConflictEngineException e) { @@ -1504,16 +1505,21 @@ public class InternalEngineTests extends ESTestCase { assertTrue(index.isCreated()); } - private static class MockAppender extends AppenderSkeleton { + private static class MockAppender extends AbstractAppender { public boolean sawIndexWriterMessage; public boolean sawIndexWriterIFDMessage; + public MockAppender(final String name) throws IllegalAccessException { + super(name, RegexFilter.createFilter(".*(\n.*)*", new String[0], true, null, null), null); + } + @Override - protected void append(LoggingEvent event) { - if (event.getLevel() == Level.TRACE && event.getMessage().toString().contains("[index][1] ")) { + public void append(LogEvent event) { + final String formattedMessage = event.getMessage().getFormattedMessage(); + if (event.getLevel() == Level.TRACE && formattedMessage.contains("[index][1] ")) { if (event.getLoggerName().endsWith("lucene.iw") && - event.getMessage().toString().contains("IW: apply all deletes during flush")) { + formattedMessage.contains("IW: apply all deletes during flush")) { sawIndexWriterMessage = true; } if (event.getLoggerName().endsWith("lucene.iw.ifd")) { @@ -1521,28 +1527,20 @@ public class InternalEngineTests extends ESTestCase { } } } - - @Override - public boolean requiresLayout() { - return false; - } - - @Override - public void close() { - } } // #5891: make sure IndexWriter's infoStream output is // sent to lucene.iw with log level TRACE: - public void testIndexWriterInfoStream() { + public void testIndexWriterInfoStream() throws IllegalAccessException { assumeFalse("who tests the tester?", VERBOSE); - MockAppender mockAppender = new MockAppender(); + MockAppender mockAppender = new MockAppender("testIndexWriterInfoStream"); - Logger rootLogger = Logger.getRootLogger(); + Logger rootLogger = LogManager.getRootLogger(); Level savedLevel = rootLogger.getLevel(); - rootLogger.addAppender(mockAppender); - rootLogger.setLevel(Level.DEBUG); + TestLoggers.addAppender(rootLogger, mockAppender); + Loggers.setLevel(rootLogger, Level.DEBUG); + rootLogger = LogManager.getRootLogger(); try { // First, with DEBUG, which should NOT log IndexWriter output: @@ -1552,32 +1550,35 @@ public class InternalEngineTests extends ESTestCase { assertFalse(mockAppender.sawIndexWriterMessage); // Again, with TRACE, which should log IndexWriter output: - rootLogger.setLevel(Level.TRACE); + Loggers.setLevel(rootLogger, Level.TRACE); engine.index(new Engine.Index(newUid("2"), doc)); engine.flush(); assertTrue(mockAppender.sawIndexWriterMessage); } finally { - rootLogger.removeAppender(mockAppender); - rootLogger.setLevel(savedLevel); + TestLoggers.removeAppender(rootLogger, mockAppender); + Loggers.setLevel(rootLogger, savedLevel.toString()); } } // #8603: make sure we can separately log IFD's messages - public void testIndexWriterIFDInfoStream() { + public void testIndexWriterIFDInfoStream() throws IllegalAccessException { assumeFalse("who tests the tester?", VERBOSE); - MockAppender mockAppender = new MockAppender(); + MockAppender mockAppender = new MockAppender("testIndexWriterIFDInfoStream"); - // Works when running this test inside Intellij: - Logger iwIFDLogger = LogManager.exists("org.elasticsearch.index.engine.lucene.iw.ifd"); - if (iwIFDLogger == null) { - // Works when running this test from command line: - iwIFDLogger = LogManager.exists("index.engine.lucene.iw.ifd"); + final Logger iwIFDLogger; + if (LogManager.getContext(false).hasLogger("org.elasticsearch.index.engine.lucene.iw.ifd", new PrefixMessageFactory())) { + // Works when running this test inside Intellij: + iwIFDLogger = LogManager.getLogger("org.elasticsearch.index.engine.lucene.iw.ifd"); assertNotNull(iwIFDLogger); + } else { + // Works when running this test from command line: + assertTrue(LogManager.getContext(false).hasLogger("index.engine.lucene.iw.ifd", new PrefixMessageFactory())); + iwIFDLogger = LogManager.getLogger("index.engine.lucene.iw.ifd"); } - iwIFDLogger.addAppender(mockAppender); - iwIFDLogger.setLevel(Level.DEBUG); + TestLoggers.addAppender(iwIFDLogger, mockAppender); + Loggers.setLevel(iwIFDLogger, Level.DEBUG); try { // First, with DEBUG, which should NOT log IndexWriter output: @@ -1588,21 +1589,21 @@ public class InternalEngineTests extends ESTestCase { assertFalse(mockAppender.sawIndexWriterIFDMessage); // Again, with TRACE, which should only log IndexWriter IFD output: - iwIFDLogger.setLevel(Level.TRACE); + Loggers.setLevel(iwIFDLogger, Level.TRACE); engine.index(new Engine.Index(newUid("2"), doc)); engine.flush(); assertFalse(mockAppender.sawIndexWriterMessage); assertTrue(mockAppender.sawIndexWriterIFDMessage); } finally { - iwIFDLogger.removeAppender(mockAppender); - iwIFDLogger.setLevel(null); + TestLoggers.removeAppender(iwIFDLogger, mockAppender); + Loggers.setLevel(iwIFDLogger, (Level) null); } } public void testEnableGcDeletes() throws Exception { try (Store store = createStore(); - Engine engine = new InternalEngine(config(defaultSettings, store, createTempDir(), newMergePolicy()))) { + Engine engine = new InternalEngine(config(defaultSettings, store, createTempDir(), newMergePolicy(), IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP))) { engine.config().setEnableGcDeletes(false); // Add document @@ -1610,7 +1611,7 @@ public class InternalEngineTests extends ESTestCase { document.add(new TextField("value", "test1", Field.Store.YES)); ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, document, B_2, null); - engine.index(new Engine.Index(newUid("1"), doc, 1, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime())); + engine.index(new Engine.Index(newUid("1"), doc, 1, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false)); // Delete document we just added: engine.delete(new Engine.Delete("test", "1", newUid("1"), 10, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), false)); @@ -1635,7 +1636,7 @@ public class InternalEngineTests extends ESTestCase { // Try to index uid=1 with a too-old version, should fail: try { - engine.index(new Engine.Index(newUid("1"), doc, 2, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime())); + engine.index(new Engine.Index(newUid("1"), doc, 2, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false)); fail("did not hit expected exception"); } catch (VersionConflictEngineException vcee) { // expected @@ -1647,7 +1648,7 @@ public class InternalEngineTests extends ESTestCase { // Try to index uid=2 with a too-old version, should fail: try { - engine.index(new Engine.Index(newUid("2"), doc, 2, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime())); + engine.index(new Engine.Index(newUid("2"), doc, 2, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false)); fail("did not hit expected exception"); } catch (VersionConflictEngineException vcee) { // expected @@ -1738,7 +1739,7 @@ public class InternalEngineTests extends ESTestCase { // expected } // now it should be OK. - EngineConfig config = copy(config(defaultSettings, store, primaryTranslogDir, newMergePolicy()), EngineConfig.OpenMode.OPEN_INDEX_CREATE_TRANSLOG); + EngineConfig config = copy(config(defaultSettings, store, primaryTranslogDir, newMergePolicy(), IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP), EngineConfig.OpenMode.OPEN_INDEX_CREATE_TRANSLOG); engine = new InternalEngine(config); } @@ -1746,7 +1747,7 @@ public class InternalEngineTests extends ESTestCase { final int numDocs = randomIntBetween(1, 10); for (int i = 0; i < numDocs; i++) { ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null); - Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime()); + Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); engine.index(firstIndexRequest); assertThat(firstIndexRequest.version(), equalTo(1L)); } @@ -1796,7 +1797,7 @@ public class InternalEngineTests extends ESTestCase { final int numDocs = randomIntBetween(1, 10); for (int i = 0; i < numDocs; i++) { ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null); - Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime()); + Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); engine.index(firstIndexRequest); assertThat(firstIndexRequest.version(), equalTo(1L)); } @@ -1886,7 +1887,7 @@ public class InternalEngineTests extends ESTestCase { final int numExtraDocs = randomIntBetween(1, 10); for (int i = 0; i < numExtraDocs; i++) { ParsedDocument doc = testParsedDocument("extra" + Integer.toString(i), "extra" + Integer.toString(i), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null); - Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime()); + Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); engine.index(firstIndexRequest); assertThat(firstIndexRequest.version(), equalTo(1L)); } @@ -1915,7 +1916,7 @@ public class InternalEngineTests extends ESTestCase { final int numDocs = randomIntBetween(1, 10); for (int i = 0; i < numDocs; i++) { ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null); - Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime()); + Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); engine.index(firstIndexRequest); assertThat(firstIndexRequest.version(), equalTo(1L)); } @@ -1958,7 +1959,7 @@ public class InternalEngineTests extends ESTestCase { int randomId = randomIntBetween(numDocs + 1, numDocs + 10); String uuidValue = "test#" + Integer.toString(randomId); ParsedDocument doc = testParsedDocument(uuidValue, Integer.toString(randomId), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null); - Engine.Index firstIndexRequest = new Engine.Index(newUid(uuidValue), doc, 1, VersionType.EXTERNAL, PRIMARY, System.nanoTime()); + Engine.Index firstIndexRequest = new Engine.Index(newUid(uuidValue), doc, 1, VersionType.EXTERNAL, PRIMARY, System.nanoTime(), -1, false); engine.index(firstIndexRequest); assertThat(firstIndexRequest.version(), equalTo(1L)); if (flush) { @@ -1966,7 +1967,7 @@ public class InternalEngineTests extends ESTestCase { } doc = testParsedDocument(uuidValue, Integer.toString(randomId), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null); - Engine.Index idxRequest = new Engine.Index(newUid(uuidValue), doc, 2, VersionType.EXTERNAL, PRIMARY, System.nanoTime()); + Engine.Index idxRequest = new Engine.Index(newUid(uuidValue), doc, 2, VersionType.EXTERNAL, PRIMARY, System.nanoTime(), -1, false); engine.index(idxRequest); engine.refresh("test"); assertThat(idxRequest.version(), equalTo(2L)); @@ -2003,7 +2004,7 @@ public class InternalEngineTests extends ESTestCase { public final AtomicInteger recoveredOps = new AtomicInteger(0); - public TranslogHandler(String indexName, ESLogger logger) { + public TranslogHandler(String indexName, Logger logger) { super(new ShardId("test", "_na_", 0), null, logger); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); RootObjectMapper.Builder rootBuilder = new RootObjectMapper.Builder("test"); @@ -2032,7 +2033,7 @@ public class InternalEngineTests extends ESTestCase { final int numDocs = randomIntBetween(1, 10); for (int i = 0; i < numDocs; i++) { ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null); - Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime()); + Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); engine.index(firstIndexRequest); assertThat(firstIndexRequest.version(), equalTo(1L)); } @@ -2058,7 +2059,7 @@ public class InternalEngineTests extends ESTestCase { config.getIndexSettings(), null, store, createSnapshotDeletionPolicy(), newMergePolicy(), config.getAnalyzer(), config.getSimilarity(), new CodecService(null, logger), config.getEventListener(), config.getTranslogRecoveryPerformer(), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, - TimeValue.timeValueMinutes(5), config.getRefreshListeners()); + TimeValue.timeValueMinutes(5), config.getRefreshListeners(), IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP); try { InternalEngine internalEngine = new InternalEngine(brokenConfig); @@ -2113,12 +2114,12 @@ public class InternalEngineTests extends ESTestCase { public void testCurrentTranslogIDisCommitted() throws IOException { try (Store store = createStore()) { - EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy()); + EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP); // create { ParsedDocument doc = testParsedDocument(Integer.toString(0), Integer.toString(0), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null); - Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(0)), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime()); + Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(0)), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); try (InternalEngine engine = new InternalEngine(copy(config, EngineConfig.OpenMode.CREATE_INDEX_AND_TRANSLOG))){ engine.index(firstIndexRequest); @@ -2178,7 +2179,7 @@ public class InternalEngineTests extends ESTestCase { final int numDocs = randomIntBetween(2, 10); // at least 2 documents otherwise we don't see any deletes below for (int i = 0; i < numDocs; i++) { ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null); - Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime()); + Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); engine.index(firstIndexRequest); assertThat(firstIndexRequest.version(), equalTo(1L)); } @@ -2188,7 +2189,7 @@ public class InternalEngineTests extends ESTestCase { engine.forceMerge(randomBoolean(), 1, false, false, false); ParsedDocument doc = testParsedDocument(Integer.toString(0), Integer.toString(0), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null); - Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(0)), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime()); + Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(0)), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); engine.index(firstIndexRequest); assertThat(firstIndexRequest.version(), equalTo(2L)); engine.flush(); // flush - buffered deletes are not counted @@ -2200,4 +2201,249 @@ public class InternalEngineTests extends ESTestCase { assertEquals(0, docStats.getDeleted()); assertEquals(numDocs, docStats.getCount()); } + + public void testDoubleDelivery() throws IOException { + final ParsedDocument doc = testParsedDocument("1", "1", "test", null, 100, -1, testDocumentWithTextField(), new BytesArray("{}".getBytes(Charset.defaultCharset())), null); + Engine.Index operation = randomAppendOnly(1, doc, false); + Engine.Index retry = randomAppendOnly(1, doc, true); + if (randomBoolean()) { + engine.index(operation); + assertFalse(engine.indexWriterHasDeletions()); + assertEquals(0, engine.getNumVersionLookups()); + assertNotNull(operation.getTranslogLocation()); + engine.index(retry); + assertTrue(engine.indexWriterHasDeletions()); + assertEquals(0, engine.getNumVersionLookups()); + assertNotNull(retry.getTranslogLocation()); + assertTrue(retry.getTranslogLocation().compareTo(operation.getTranslogLocation()) > 0); + } else { + engine.index(retry); + assertTrue(engine.indexWriterHasDeletions()); + assertEquals(0, engine.getNumVersionLookups()); + assertNotNull(retry.getTranslogLocation()); + engine.index(operation); + assertTrue(engine.indexWriterHasDeletions()); + assertEquals(0, engine.getNumVersionLookups()); + assertNotNull(retry.getTranslogLocation()); + assertTrue(retry.getTranslogLocation().compareTo(operation.getTranslogLocation()) < 0); + } + + engine.refresh("test"); + try (Engine.Searcher searcher = engine.acquireSearcher("test")) { + TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); + assertEquals(1, topDocs.totalHits); + } + operation = randomAppendOnly(1, doc, false); + retry = randomAppendOnly(1, doc, true); + if (randomBoolean()) { + engine.index(operation); + assertNotNull(operation.getTranslogLocation()); + engine.index(retry); + assertNotNull(retry.getTranslogLocation()); + assertTrue(retry.getTranslogLocation().compareTo(operation.getTranslogLocation()) > 0); + } else { + engine.index(retry); + assertNotNull(retry.getTranslogLocation()); + engine.index(operation); + assertNotNull(retry.getTranslogLocation()); + assertTrue(retry.getTranslogLocation().compareTo(operation.getTranslogLocation()) < 0); + } + + engine.refresh("test"); + try (Engine.Searcher searcher = engine.acquireSearcher("test")) { + TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); + assertEquals(1, topDocs.totalHits); + } + } + + + public void testRetryWithAutogeneratedIdWorksAndNoDuplicateDocs() throws IOException { + + final ParsedDocument doc = testParsedDocument("1", "1", "test", null, 100, -1, testDocumentWithTextField(), new BytesArray("{}".getBytes(Charset.defaultCharset())), null); + boolean isRetry = false; + long autoGeneratedIdTimestamp = 0; + + Engine.Index index = new Engine.Index(newUid("1"), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); + engine.index(index); + assertThat(index.version(), equalTo(1L)); + + index = new Engine.Index(newUid("1"), doc, index.version(), index.versionType().versionTypeForReplicationAndRecovery(), REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); + replicaEngine.index(index); + assertThat(index.version(), equalTo(1L)); + + isRetry = true; + index = new Engine.Index(newUid("1"), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); + engine.index(index); + assertThat(index.version(), equalTo(1L)); + engine.refresh("test"); + try (Engine.Searcher searcher = engine.acquireSearcher("test")) { + TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); + assertEquals(1, topDocs.totalHits); + } + + index = new Engine.Index(newUid("1"), doc, index.version(), index.versionType().versionTypeForReplicationAndRecovery(), REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); + replicaEngine.index(index); + replicaEngine.refresh("test"); + try (Engine.Searcher searcher = replicaEngine.acquireSearcher("test")) { + TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); + assertEquals(1, topDocs.totalHits); + } + } + + public void testRetryWithAutogeneratedIdsAndWrongOrderWorksAndNoDuplicateDocs() throws IOException { + + final ParsedDocument doc = testParsedDocument("1", "1", "test", null, 100, -1, testDocumentWithTextField(), new BytesArray("{}".getBytes(Charset.defaultCharset())), null); + boolean isRetry = true; + long autoGeneratedIdTimestamp = 0; + + + Engine.Index firstIndexRequest = new Engine.Index(newUid("1"), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); + engine.index(firstIndexRequest); + assertThat(firstIndexRequest.version(), equalTo(1L)); + + Engine.Index firstIndexRequestReplica = new Engine.Index(newUid("1"), doc, firstIndexRequest.version(), firstIndexRequest.versionType().versionTypeForReplicationAndRecovery(), REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); + replicaEngine.index(firstIndexRequestReplica); + assertThat(firstIndexRequestReplica.version(), equalTo(1L)); + + isRetry = false; + Engine.Index secondIndexRequest = new Engine.Index(newUid("1"), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); + engine.index(secondIndexRequest); + assertTrue(secondIndexRequest.isCreated()); + engine.refresh("test"); + try (Engine.Searcher searcher = engine.acquireSearcher("test")) { + TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); + assertEquals(1, topDocs.totalHits); + } + + Engine.Index secondIndexRequestReplica = new Engine.Index(newUid("1"), doc, firstIndexRequest.version(), firstIndexRequest.versionType().versionTypeForReplicationAndRecovery(), REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); + replicaEngine.index(secondIndexRequestReplica); + replicaEngine.refresh("test"); + try (Engine.Searcher searcher = replicaEngine.acquireSearcher("test")) { + TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); + assertEquals(1, topDocs.totalHits); + } + } + + public Engine.Index randomAppendOnly(int docId, ParsedDocument doc, boolean retry) { + if (randomBoolean()) { + return new Engine.Index(newUid(Integer.toString(docId)), doc, Versions.MATCH_ANY, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), docId, retry); + } + return new Engine.Index(newUid(Integer.toString(docId)), doc, 1, VersionType.EXTERNAL, Engine.Operation.Origin.REPLICA, System.nanoTime(), docId, retry); + } + + public void testRetryConcurrently() throws InterruptedException, IOException { + Thread[] thread = new Thread[randomIntBetween(3, 5)]; + int numDocs = randomIntBetween(1000, 10000); + List docs = new ArrayList<>(); + for (int i = 0; i < numDocs; i++) { + final ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, i, -1, testDocumentWithTextField(), new BytesArray("{}".getBytes(Charset.defaultCharset())), null); + Engine.Index originalIndex = randomAppendOnly(i, doc, false); + Engine.Index retryIndex = randomAppendOnly(i, doc, true); + docs.add(originalIndex); + docs.add(retryIndex); + } + Collections.shuffle(docs, random()); + CountDownLatch startGun = new CountDownLatch(thread.length); + AtomicInteger offset = new AtomicInteger(-1); + for (int i = 0; i < thread.length; i++) { + thread[i] = new Thread() { + @Override + public void run() { + startGun.countDown(); + try { + startGun.await(); + } catch (InterruptedException e) { + throw new AssertionError(e); + } + int docOffset; + while ((docOffset = offset.incrementAndGet()) < docs.size()) { + engine.index(docs.get(docOffset)); + } + } + }; + thread[i].start(); + } + for (int i = 0; i < thread.length; i++) { + thread[i].join(); + } + assertEquals(0, engine.getNumVersionLookups()); + assertEquals(0, engine.getNumIndexVersionsLookups()); + engine.refresh("test"); + try (Engine.Searcher searcher = engine.acquireSearcher("test")) { + TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); + assertEquals(numDocs, topDocs.totalHits); + } + assertTrue(engine.indexWriterHasDeletions()); + } + + public void testEngineMaxTimestampIsInitialized() throws IOException { + try (Store store = createStore(); + Engine engine = new InternalEngine(config(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE, + IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP))) { + assertEquals(IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, engine.segmentsStats(false).getMaxUnsafeAutoIdTimestamp()); + + } + + long maxTimestamp = Math.abs(randomLong()); + try (Store store = createStore(); + Engine engine = new InternalEngine(config(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE, + maxTimestamp))) { + assertEquals(maxTimestamp, engine.segmentsStats(false).getMaxUnsafeAutoIdTimestamp()); + } + } + + public void testAppendConcurrently() throws InterruptedException, IOException { + Thread[] thread = new Thread[randomIntBetween(3, 5)]; + int numDocs = randomIntBetween(1000, 10000); + assertEquals(0, engine.getNumVersionLookups()); + assertEquals(0, engine.getNumIndexVersionsLookups()); + List docs = new ArrayList<>(); + for (int i = 0; i < numDocs; i++) { + final ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, i, -1, testDocumentWithTextField(), new BytesArray("{}".getBytes(Charset.defaultCharset())), null); + Engine.Index index = randomAppendOnly(i, doc, false); + docs.add(index); + } + Collections.shuffle(docs, random()); + CountDownLatch startGun = new CountDownLatch(thread.length); + AtomicInteger offset = new AtomicInteger(-1); + for (int i = 0; i < thread.length; i++) { + thread[i] = new Thread() { + @Override + public void run() { + startGun.countDown(); + try { + startGun.await(); + } catch (InterruptedException e) { + throw new AssertionError(e); + } + int docOffset; + while ((docOffset = offset.incrementAndGet()) < docs.size()) { + engine.index(docs.get(docOffset)); + } + } + }; + thread[i].start(); + } + for (int i = 0; i < thread.length; i++) { + thread[i].join(); + } + + engine.refresh("test"); + try (Engine.Searcher searcher = engine.acquireSearcher("test")) { + TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); + assertEquals(docs.size(), topDocs.totalHits); + } + assertEquals(0, engine.getNumVersionLookups()); + assertEquals(0, engine.getNumIndexVersionsLookups()); + assertFalse(engine.indexWriterHasDeletions()); + + } + + public static long getNumVersionLookups(InternalEngine engine) { // for other tests to access this + return engine.getNumVersionLookups(); + } + + public static long getNumIndexVersionsLookups(InternalEngine engine) { // for other tests to access this + return engine.getNumIndexVersionsLookups(); + } } diff --git a/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java index 9d26c9593af..6dea774f258 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java @@ -38,6 +38,7 @@ import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesArray; @@ -88,9 +89,6 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -/** - * TODO: document me! - */ public class ShadowEngineTests extends ESTestCase { protected final ShardId shardId = new ShardId("index", "_na_", 1); @@ -250,7 +248,7 @@ public class ShadowEngineTests extends ESTestCase { EngineConfig config = new EngineConfig(openMode, shardId, threadPool, indexSettings, null, store, createSnapshotDeletionPolicy(), mergePolicy, iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), eventListener, null, IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, - TimeValue.timeValueMinutes(5), refreshListeners); + TimeValue.timeValueMinutes(5), refreshListeners, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP); return config; } @@ -989,7 +987,7 @@ public class ShadowEngineTests extends ESTestCase { final int numDocs = randomIntBetween(2, 10); // at least 2 documents otherwise we don't see any deletes below for (int i = 0; i < numDocs; i++) { ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null); - Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime()); + Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); primaryEngine.index(firstIndexRequest); assertThat(firstIndexRequest.version(), equalTo(1L)); } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingDisabledTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingDisabledTests.java index 0cd6f93ba3a..111f4b470d4 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingDisabledTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingDisabledTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.indices.IndicesService; @@ -81,7 +82,8 @@ public class DynamicMappingDisabledTests extends ESSingleNodeTestCase { shardStateAction = new ShardStateAction(settings, clusterService, transportService, null, null, THREAD_POOL); actionFilters = new ActionFilters(Collections.emptySet()); indexNameExpressionResolver = new IndexNameExpressionResolver(settings); - autoCreateIndex = new AutoCreateIndex(settings, indexNameExpressionResolver); + autoCreateIndex = new AutoCreateIndex(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + indexNameExpressionResolver); } @After diff --git a/core/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldTypeTests.java index b3cbaa291b4..59571b70231 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldTypeTests.java @@ -117,8 +117,8 @@ public class ScaledFloatFieldTypeTests extends FieldTypeTestCase { IndexSearcher searcher = newSearcher(reader); final int numQueries = 1000; for (int i = 0; i < numQueries; ++i) { - double l = (randomDouble() * 2 - 1) * 10000; - double u = (randomDouble() * 2 - 1) * 10000; + Double l = randomBoolean() ? null : (randomDouble() * 2 - 1) * 10000; + Double u = randomBoolean() ? null : (randomDouble() * 2 - 1) * 10000; boolean includeLower = randomBoolean(); boolean includeUpper = randomBoolean(); Query doubleQ = NumberFieldMapper.NumberType.DOUBLE.rangeQuery("double", l, u, includeLower, includeUpper); diff --git a/core/src/test/java/org/elasticsearch/index/query/QueryRewriteContextTests.java b/core/src/test/java/org/elasticsearch/index/query/QueryRewriteContextTests.java new file mode 100644 index 00000000000..a39fbae1764 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/index/query/QueryRewriteContextTests.java @@ -0,0 +1,63 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.indices.query.IndicesQueriesRegistry; +import org.elasticsearch.script.ScriptSettings; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.test.ESTestCase; + +import static java.util.Collections.emptyList; + +public class QueryRewriteContextTests extends ESTestCase { + + public void testNewParseContextWithLegacyScriptLanguage() throws Exception { + String defaultLegacyScriptLanguage = randomAsciiOfLength(4); + IndexMetaData.Builder indexMetadata = new IndexMetaData.Builder("index"); + indexMetadata.settings(Settings.builder().put("index.version.created", Version.CURRENT) + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 1) + ); + IndicesQueriesRegistry indicesQueriesRegistry = new SearchModule(Settings.EMPTY, false, emptyList()).getQueryParserRegistry(); + IndexSettings indexSettings = new IndexSettings(indexMetadata.build(), + Settings.builder().put(ScriptSettings.LEGACY_SCRIPT_SETTING, defaultLegacyScriptLanguage).build()); + QueryRewriteContext queryRewriteContext = + new QueryRewriteContext(indexSettings, null, null, indicesQueriesRegistry, null, null, null);; + + // verify that the default script language in the query parse context is equal to defaultLegacyScriptLanguage variable: + QueryParseContext queryParseContext = + queryRewriteContext.newParseContextWithLegacyScriptLanguage(XContentHelper.createParser(new BytesArray("{}"))); + assertEquals(defaultLegacyScriptLanguage, queryParseContext.getDefaultScriptLanguage()); + + // verify that the script query's script language is equal to defaultLegacyScriptLanguage variable: + XContentParser parser = XContentHelper.createParser(new BytesArray("{\"script\" : {\"script\": \"return true\"}}")); + queryParseContext = queryRewriteContext.newParseContextWithLegacyScriptLanguage(parser); + ScriptQueryBuilder queryBuilder = (ScriptQueryBuilder) queryParseContext.parseInnerQueryBuilder().get(); + assertEquals(defaultLegacyScriptLanguage, queryBuilder.script().getLang()); + } + +} diff --git a/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java index be77ba00734..ce49f18ccfc 100644 --- a/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java @@ -26,15 +26,16 @@ import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.DisjunctionMaxQuery; +import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.PhraseQuery; +import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.RegexpQuery; -import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.SynonymQuery; -import org.apache.lucene.search.PrefixQuery; -import org.apache.lucene.search.MultiTermQuery; +import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.automaton.TooComplexToDeterminizeException; import org.elasticsearch.common.lucene.all.AllTermQuery; import org.elasticsearch.common.unit.Fuzziness; @@ -390,6 +391,32 @@ public class QueryStringQueryBuilderTests extends AbstractQueryTestCase 0); + + int length = randomIntBetween(1, 10); + StringBuilder queryString = new StringBuilder(); + for (int i = 0; i < length; i++) { + queryString.append("a"); + } + queryString.append("~"); + + int expectedEdits; + if (length <= 2) { + expectedEdits = 0; + } else if (3 <= length && length <= 5) { + expectedEdits = 1; + } else { + expectedEdits = 2; + } + + Query query = queryStringQuery(queryString.toString()).defaultField(STRING_FIELD_NAME).fuzziness(Fuzziness.AUTO) + .toQuery(createShardContext()); + assertThat(query, instanceOf(FuzzyQuery.class)); + FuzzyQuery fuzzyQuery = (FuzzyQuery) query; + assertEquals(expectedEdits, fuzzyQuery.getMaxEdits()); + } + public void testFuzzyNumeric() throws Exception { assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); QueryStringQueryBuilder query = queryStringQuery("12~0.2").defaultField(INT_FIELD_NAME); diff --git a/core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreEquivalenceTests.java b/core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreEquivalenceTests.java index 8c069a67a7a..1c220f172ae 100644 --- a/core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreEquivalenceTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreEquivalenceTests.java @@ -24,6 +24,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.RandomApproximationQuery; import org.apache.lucene.search.SearchEquivalenceTestBase; import org.apache.lucene.search.TermQuery; +import org.elasticsearch.bootstrap.BootstrapForTesting; import org.elasticsearch.common.lucene.search.function.CombineFunction; import org.elasticsearch.common.lucene.search.function.FiltersFunctionScoreQuery; import org.elasticsearch.common.lucene.search.function.FiltersFunctionScoreQuery.FilterFunction; @@ -31,6 +32,14 @@ import org.elasticsearch.common.lucene.search.function.FiltersFunctionScoreQuery import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery; public class FunctionScoreEquivalenceTests extends SearchEquivalenceTestBase { + static { + try { + Class.forName("org.elasticsearch.test.ESTestCase"); + } catch (ClassNotFoundException e) { + throw new AssertionError(e); + } + BootstrapForTesting.ensureInitialized(); + } public void testMinScoreAllIncluded() throws Exception { Term term = randomTerm(); diff --git a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index 3b1284e671c..ec794091a42 100644 --- a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -110,7 +110,7 @@ import static org.hamcrest.Matchers.equalTo; public abstract class ESIndexLevelReplicationTestCase extends ESTestCase { protected ThreadPool threadPool; - private final Index index = new Index("test", "uuid"); + protected final Index index = new Index("test", "uuid"); private final ShardId shardId = new ShardId(index, 0); private final Map indexMapping = Collections.singletonMap("type", "{ \"type\": {} }"); protected static final PeerRecoveryTargetService.RecoveryListener recoveryListener = new PeerRecoveryTargetService.RecoveryListener() { @@ -262,6 +262,15 @@ public abstract class ESIndexLevelReplicationTestCase extends ESTestCase { return numOfDoc; } + public int appendDocs(final int numOfDoc) throws Exception { + for (int doc = 0; doc < numOfDoc; doc++) { + final IndexRequest indexRequest = new IndexRequest(index.getName(), "type").source("{}"); + final IndexResponse response = index(indexRequest); + assertEquals(DocWriteResponse.Result.CREATED, response.getResult()); + } + return numOfDoc; + } + public IndexResponse index(IndexRequest indexRequest) throws Exception { PlainActionFuture listener = new PlainActionFuture<>(); IndexingOp op = new IndexingOp(indexRequest, listener, this); @@ -275,8 +284,7 @@ public abstract class ESIndexLevelReplicationTestCase extends ESTestCase { primary.recoverFromStore(); primary.updateRoutingEntry(ShardRoutingHelper.moveToStarted(primary.routingEntry())); for (IndexShard replicaShard : replicas) { - recoverReplica(replicaShard, - (replica, sourceNode) -> new RecoveryTarget(replica, sourceNode, recoveryListener, version -> {})); + recoverReplica(replicaShard); } } @@ -285,6 +293,11 @@ public abstract class ESIndexLevelReplicationTestCase extends ESTestCase { replicas.add(replica); return replica; } + + public void recoverReplica(IndexShard replica) throws IOException { + recoverReplica(replica, (r, sourceNode) -> new RecoveryTarget(r, sourceNode, recoveryListener, version -> {})); + } + public void recoverReplica(IndexShard replica, BiFunction targetSupplier) throws IOException { recoverReplica(replica, targetSupplier, true); diff --git a/core/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java b/core/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java index c6d7878406a..407b374a92e 100644 --- a/core/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java +++ b/core/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java @@ -18,6 +18,22 @@ */ package org.elasticsearch.index.replication; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.InternalEngine; +import org.elasticsearch.index.engine.InternalEngineTests; +import org.elasticsearch.index.engine.SegmentsStats; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShardTests; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.indices.recovery.RecoveryTarget; + +import java.io.IOException; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Future; + public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase { public void testSimpleReplication() throws Exception { @@ -28,4 +44,81 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase shards.assertAllEqual(docCount); } } + + public void testSimpleAppendOnlyReplication() throws Exception { + try (ReplicationGroup shards = createGroup(randomInt(2))) { + shards.startAll(); + final int docCount = randomInt(50); + shards.appendDocs(docCount); + shards.assertAllEqual(docCount); + } + } + + public void testAppendWhileRecovering() throws Exception { + try (ReplicationGroup shards = createGroup(0)) { + shards.startAll(); + IndexShard replica = shards.addReplica(); + CountDownLatch latch = new CountDownLatch(2); + int numDocs = randomIntBetween(100, 200); + shards.appendDocs(1);// just append one to the translog so we can assert below + Thread thread = new Thread() { + @Override + public void run() { + try { + latch.countDown(); + latch.await(); + shards.appendDocs(numDocs-1); + } catch (Exception e) { + throw new AssertionError(e); + } + } + }; + thread.start(); + Future future = shards.asyncRecoverReplica(replica, (indexShard, node) + -> new RecoveryTarget(indexShard, node, recoveryListener, version -> {}) { + @Override + public void cleanFiles(int totalTranslogOps, Store.MetadataSnapshot sourceMetaData) throws IOException { + super.cleanFiles(totalTranslogOps, sourceMetaData); + latch.countDown(); + try { + latch.await(); + } catch (InterruptedException e) { + throw new AssertionError(e); + } + } + }); + future.get(); + thread.join(); + shards.assertAllEqual(numDocs); + Engine engine = IndexShardTests.getEngineFromShard(replica); + assertEquals("expected at no version lookups ", InternalEngineTests.getNumVersionLookups((InternalEngine) engine), 0); + for (IndexShard shard : shards) { + engine = IndexShardTests.getEngineFromShard(shard); + assertEquals(0, InternalEngineTests.getNumIndexVersionsLookups((InternalEngine) engine)); + assertEquals(0, InternalEngineTests.getNumVersionLookups((InternalEngine) engine)); + } + } + } + + public void testInheritMaxValidAutoIDTimestampOnRecovery() throws Exception { + try (ReplicationGroup shards = createGroup(0)) { + shards.startAll(); + final IndexRequest indexRequest = new IndexRequest(index.getName(), "type").source("{}"); + indexRequest.onRetry(); // force an update of the timestamp + final IndexResponse response = shards.index(indexRequest); + assertEquals(DocWriteResponse.Result.CREATED, response.getResult()); + if (randomBoolean()) { // lets check if that also happens if no translog record is replicated + shards.flush(); + } + IndexShard replica = shards.addReplica(); + shards.recoverReplica(replica); + + SegmentsStats segmentsStats = replica.segmentStats(false); + SegmentsStats primarySegmentStats = shards.getPrimary().segmentStats(false); + assertNotEquals(IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, primarySegmentStats.getMaxUnsafeAutoIdTimestamp()); + assertEquals(primarySegmentStats.getMaxUnsafeAutoIdTimestamp(), segmentsStats.getMaxUnsafeAutoIdTimestamp()); + assertNotEquals(Long.MAX_VALUE, segmentsStats.getMaxUnsafeAutoIdTimestamp()); + } + } + } diff --git a/core/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java b/core/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java index a690d2ae7a8..371764acc97 100644 --- a/core/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java +++ b/core/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java @@ -18,14 +18,14 @@ */ package org.elasticsearch.index.replication; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.indices.recovery.RecoveryTarget; -import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; import java.io.IOException; import java.util.EnumSet; @@ -62,10 +62,10 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC private final RecoveryState.Stage stageToBlock; public static final EnumSet SUPPORTED_STAGES = EnumSet.of(RecoveryState.Stage.INDEX, RecoveryState.Stage.TRANSLOG, RecoveryState.Stage.FINALIZE); - private final ESLogger logger; + private final Logger logger; BlockingTarget(RecoveryState.Stage stageToBlock, CountDownLatch recoveryBlocked, CountDownLatch releaseRecovery, IndexShard shard, - DiscoveryNode sourceNode, PeerRecoveryTargetService.RecoveryListener listener, ESLogger logger) { + DiscoveryNode sourceNode, PeerRecoveryTargetService.RecoveryListener listener, Logger logger) { super(shard, sourceNode, listener, version -> {}); this.recoveryBlocked = recoveryBlocked; this.releaseRecovery = releaseRecovery; diff --git a/core/src/test/java/org/elasticsearch/index/shard/CommitPointsTests.java b/core/src/test/java/org/elasticsearch/index/shard/CommitPointsTests.java index 8821f0b9e77..c723538c837 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/CommitPointsTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/CommitPointsTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.shard; -import org.elasticsearch.common.logging.ESLogger; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.test.ESTestCase; @@ -33,7 +33,7 @@ import static org.hamcrest.Matchers.nullValue; * */ public class CommitPointsTests extends ESTestCase { - private final ESLogger logger = Loggers.getLogger(CommitPointsTests.class); + private final Logger logger = Loggers.getLogger(CommitPointsTests.class); public void testCommitPointXContent() throws Exception { ArrayList indexFiles = new ArrayList<>(); diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 652cb6e778a..b4725a8506d 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.index.shard; +import org.apache.logging.log4j.Logger; import org.apache.lucene.document.Field; import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.index.CorruptIndexException; @@ -73,7 +74,6 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.unit.ByteSizeUnit; @@ -473,7 +473,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { assertNotNull(indexStats.getShards()[0].getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID)); } - public static ShardStateMetaData load(ESLogger logger, Path... shardPaths) throws IOException { + public static ShardStateMetaData load(Logger logger, Path... shardPaths) throws IOException { return ShardStateMetaData.FORMAT.loadLatestState(logger, shardPaths); } @@ -1611,7 +1611,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { operations.add(new Translog.Index("testtype", "1", BytesReference.toBytes(jsonBuilder().startObject().field("foo", "bar").endObject().bytes()))); newShard.prepareForIndexRecovery(); newShard.recoveryState().getTranslog().totalOperations(operations.size()); - newShard.skipTranslogRecovery(); + newShard.skipTranslogRecovery(IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP); newShard.performBatchRecovery(operations); assertFalse(newShard.getTranslog().syncNeeded()); } @@ -1668,7 +1668,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { List operations = new ArrayList<>(); operations.add(new Translog.Index("testtype", "1", BytesReference.toBytes(jsonBuilder().startObject().field("foo", "bar").endObject().bytes()))); newShard.prepareForIndexRecovery(); - newShard.skipTranslogRecovery(); + newShard.skipTranslogRecovery(IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP); // Shard is still inactive since we haven't started recovering yet assertFalse(newShard.isActive()); newShard.performBatchRecovery(operations); @@ -1820,4 +1820,8 @@ public class IndexShardTests extends ESSingleNodeTestCase { @Override public void verify(String verificationToken, DiscoveryNode localNode) {} } + + public static Engine getEngineFromShard(IndexShard shard) { + return shard.getEngineOrNull(); + } } diff --git a/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java index 20fd02b5163..f3f15b2639c 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java @@ -29,6 +29,7 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.store.Directory; import org.apache.lucene.util.IOUtils; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -36,7 +37,6 @@ import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.codec.CodecService; @@ -66,7 +66,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Locale; -import java.util.concurrent.ScheduledFuture; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; @@ -125,7 +124,7 @@ public class RefreshListenersTests extends ESTestCase { store, new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()), newMergePolicy(), iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), eventListener, new TranslogHandler(shardId.getIndexName(), logger), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, - TimeValue.timeValueMinutes(5), listeners); + TimeValue.timeValueMinutes(5), listeners, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP); engine = new InternalEngine(config); } diff --git a/core/src/test/java/org/elasticsearch/index/store/ExceptionRetryIT.java b/core/src/test/java/org/elasticsearch/index/store/ExceptionRetryIT.java index b43595b4b93..aa9de8de871 100644 --- a/core/src/test/java/org/elasticsearch/index/store/ExceptionRetryIT.java +++ b/core/src/test/java/org/elasticsearch/index/store/ExceptionRetryIT.java @@ -20,13 +20,20 @@ package org.elasticsearch.index.store; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.elasticsearch.action.admin.indices.stats.IndexShardStats; +import org.elasticsearch.action.admin.indices.stats.IndexStats; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; +import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.bulk.TransportShardBulkAction; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.client.Client; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.engine.SegmentsStats; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.SearchHit; import org.elasticsearch.test.ESIntegTestCase; @@ -44,6 +51,7 @@ import java.util.HashSet; import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.termQuery; @@ -53,7 +61,8 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSear import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.greaterThan; -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE) +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE, numDataNodes = 2, + supportsDedicatedMasters = false, numClientNodes = 1, transportClientRatio = 0.0) public class ExceptionRetryIT extends ESIntegTestCase { @Override @@ -68,40 +77,46 @@ public class ExceptionRetryIT extends ESIntegTestCase { } /** - * Tests retry mechanism when indexing. If an exception occurs when indexing then the indexing request is tried again before finally failing. - * If auto generated ids are used this must not lead to duplicate ids + * Tests retry mechanism when indexing. If an exception occurs when indexing then the indexing request is tried again before finally + * failing. If auto generated ids are used this must not lead to duplicate ids * see https://github.com/elastic/elasticsearch/issues/8788 */ public void testRetryDueToExceptionOnNetworkLayer() throws ExecutionException, InterruptedException, IOException { final AtomicBoolean exceptionThrown = new AtomicBoolean(false); int numDocs = scaledRandomIntBetween(100, 1000); + Client client = internalCluster().coordOnlyNodeClient(); NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().get(); - NodeStats unluckyNode = randomFrom(nodeStats.getNodes()); - assertAcked(client().admin().indices().prepareCreate("index")); + NodeStats unluckyNode = randomFrom(nodeStats.getNodes().stream().filter((s) -> s.getNode().isDataNode()) + .collect(Collectors.toList())); + assertAcked(client().admin().indices().prepareCreate("index").setSettings(Settings.builder() + .put("index.number_of_replicas", 1) + .put("index.number_of_shards", 5))); ensureGreen("index"); - + logger.info("unlucky node: {}", unluckyNode.getNode()); //create a transport service that throws a ConnectTransportException for one bulk request and therefore triggers a retry. for (NodeStats dataNode : nodeStats.getNodes()) { - MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(TransportService.class, dataNode.getNode().getName())); - mockTransportService.addDelegate(internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()), new MockTransportService.DelegateTransport(mockTransportService.original()) { + MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(TransportService.class, + dataNode.getNode().getName())); + mockTransportService.addDelegate(internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()), + new MockTransportService.DelegateTransport(mockTransportService.original()) { @Override - public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException { + public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, + TransportRequestOptions options) throws IOException, TransportException { super.sendRequest(node, requestId, action, request, options); - if (action.equals(TransportShardBulkAction.ACTION_NAME) && !exceptionThrown.get()) { + if (action.equals(TransportShardBulkAction.ACTION_NAME) && exceptionThrown.compareAndSet(false, true)) { logger.debug("Throw ConnectTransportException"); - exceptionThrown.set(true); throw new ConnectTransportException(node, action); } } }); } - BulkRequestBuilder bulkBuilder = client().prepareBulk(); + BulkRequestBuilder bulkBuilder = client.prepareBulk(); for (int i = 0; i < numDocs; i++) { XContentBuilder doc = null; doc = jsonBuilder().startObject().field("foo", "bar").endObject(); - bulkBuilder.add(client().prepareIndex("index", "type").setSource(doc)); + bulkBuilder.add(client.prepareIndex("index", "type").setSource(doc)); } BulkResponse response = bulkBuilder.get(); @@ -122,7 +137,8 @@ public class ExceptionRetryIT extends ESIntegTestCase { for (int i = 0; i < searchResponse.getHits().getHits().length; i++) { if (!uniqueIds.add(searchResponse.getHits().getHits()[i].getId())) { if (!found_duplicate_already) { - SearchResponse dupIdResponse = client().prepareSearch("index").setQuery(termQuery("_id", searchResponse.getHits().getHits()[i].getId())).setExplain(true).get(); + SearchResponse dupIdResponse = client().prepareSearch("index").setQuery(termQuery("_id", + searchResponse.getHits().getHits()[i].getId())).setExplain(true).get(); assertThat(dupIdResponse.getHits().totalHits(), greaterThan(1L)); logger.info("found a duplicate id:"); for (SearchHit hit : dupIdResponse.getHits()) { @@ -137,5 +153,16 @@ public class ExceptionRetryIT extends ESIntegTestCase { assertSearchResponse(searchResponse); assertThat(dupCounter, equalTo(0L)); assertHitCount(searchResponse, numDocs); + IndicesStatsResponse index = client().admin().indices().prepareStats("index").clear().setSegments(true).get(); + IndexStats indexStats = index.getIndex("index"); + long maxUnsafeAutoIdTimestamp = Long.MIN_VALUE; + for (IndexShardStats indexShardStats : indexStats) { + for (ShardStats shardStats : indexShardStats) { + SegmentsStats segments = shardStats.getStats().getSegments(); + maxUnsafeAutoIdTimestamp = Math.max(maxUnsafeAutoIdTimestamp, segments.getMaxUnsafeAutoIdTimestamp()); + } + } + assertTrue("exception must have been thrown otherwise setup is broken", exceptionThrown.get()); + assertTrue("maxUnsafeAutoIdTimestamp must be > than 0 we have at least one retry", maxUnsafeAutoIdTimestamp > -1); } } diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index 08b4d8ac71e..9961637c323 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -20,6 +20,8 @@ package org.elasticsearch.index.translog; import com.carrotsearch.randomizedtesting.generators.RandomPicks; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.index.Term; import org.apache.lucene.mockfile.FilterFileChannel; @@ -342,7 +344,7 @@ public class TranslogTests extends ESTestCase { assertThat(stats.estimatedNumberOfOperations(), equalTo(0L)); assertThat(stats.getTranslogSizeInBytes(), equalTo(firstOperationPosition)); assertEquals(6, total.estimatedNumberOfOperations()); - assertEquals(431, total.getTranslogSizeInBytes()); + assertEquals(455, total.getTranslogSizeInBytes()); BytesStreamOutput out = new BytesStreamOutput(); total.writeTo(out); @@ -350,14 +352,13 @@ public class TranslogTests extends ESTestCase { copy.readFrom(out.bytes().streamInput()); assertEquals(6, copy.estimatedNumberOfOperations()); - assertEquals(431, copy.getTranslogSizeInBytes()); + assertEquals(455, copy.getTranslogSizeInBytes()); try (XContentBuilder builder = XContentFactory.jsonBuilder()) { builder.startObject(); copy.toXContent(builder, ToXContent.EMPTY_PARAMS); builder.endObject(); - - assertEquals("{\"translog\":{\"operations\":6,\"size_in_bytes\":431}}", builder.string()); + assertEquals("{\"translog\":{\"operations\":6,\"size_in_bytes\":455}}", builder.string()); } try { @@ -667,7 +668,7 @@ public class TranslogTests extends ESTestCase { @Override public void onFailure(Exception e) { - logger.error("--> writer [{}] had an error", e, threadName); + logger.error((Supplier) () -> new ParameterizedMessage("--> writer [{}] had an error", threadName), e); errors.add(e); } }, threadName); @@ -682,7 +683,7 @@ public class TranslogTests extends ESTestCase { @Override public void onFailure(Exception e) { - logger.error("--> reader [{}] had an error", e, threadId); + logger.error((Supplier) () -> new ParameterizedMessage("--> reader [{}] had an error", threadId), e); errors.add(e); try { closeView(); @@ -1164,7 +1165,7 @@ public class TranslogTests extends ESTestCase { try (Translog translog = new Translog(config, translogGeneration)) { fail("corrupted"); } catch (IllegalStateException ex) { - assertEquals(ex.getMessage(), "Checkpoint file translog-2.ckp already exists but has corrupted content expected: Checkpoint{offset=2683, numOps=55, translogFileGeneration= 2} but got: Checkpoint{offset=0, numOps=0, translogFileGeneration= 0}"); + assertEquals(ex.getMessage(), "Checkpoint file translog-2.ckp already exists but has corrupted content expected: Checkpoint{offset=3123, numOps=55, translogFileGeneration= 2} but got: Checkpoint{offset=0, numOps=0, translogFileGeneration= 0}"); } Checkpoint.write(FileChannel::open, config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation)), read, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING); try (Translog translog = new Translog(config, translogGeneration)) { diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java b/core/src/test/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java index 0fd4a4b45ec..16a926b9e7a 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java @@ -28,10 +28,13 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import org.elasticsearch.test.junit.annotations.TestLogging; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import static org.hamcrest.Matchers.equalTo; @@ -49,7 +52,7 @@ public class IndexPrimaryRelocationIT extends ESIntegTestCase { .addMapping("type", "field", "type=text") .get(); ensureGreen("test"); - + AtomicInteger numAutoGenDocs = new AtomicInteger(); final AtomicBoolean finished = new AtomicBoolean(false); Thread indexingThread = new Thread() { @Override @@ -59,6 +62,8 @@ public class IndexPrimaryRelocationIT extends ESIntegTestCase { assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); DeleteResponse deleteResponse = client().prepareDelete("test", "type", "id").get(); assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult()); + client().prepareIndex("test", "type").setSource("auto", true).get(); + numAutoGenDocs.incrementAndGet(); } } }; @@ -87,5 +92,9 @@ public class IndexPrimaryRelocationIT extends ESIntegTestCase { } finished.set(true); indexingThread.join(); + refresh("test"); + ElasticsearchAssertions.assertHitCount(client().prepareSearch("test").get(), numAutoGenDocs.get()); + ElasticsearchAssertions.assertHitCount(client().prepareSearch("test")// extra paranoia ;) + .setQuery(QueryBuilders.termQuery("auto", true)).get(), numAutoGenDocs.get()); } } diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index 4c565d15bbb..a28af0561be 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -580,7 +580,7 @@ public class IndexRecoveryIT extends ESIntegTestCase { List requests = new ArrayList<>(); int numDocs = scaledRandomIntBetween(25, 250); for (int i = 0; i < numDocs; i++) { - requests.add(client().prepareIndex(indexName, "type").setCreate(true).setSource("{}")); + requests.add(client().prepareIndex(indexName, "type").setSource("{}")); } indexRandom(true, requests); ensureSearchable(indexName); diff --git a/core/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java b/core/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java index 4d82acf87f5..81d1e05e9e0 100644 --- a/core/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java +++ b/core/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java @@ -19,16 +19,20 @@ package org.elasticsearch.indices.settings; -import org.apache.log4j.AppenderSkeleton; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; -import org.apache.log4j.spi.LoggingEvent; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.LogEvent; +import org.apache.logging.log4j.core.appender.AbstractAppender; +import org.apache.logging.log4j.core.filter.RegexFilter; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Priority; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.TestLoggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexModule; @@ -348,13 +352,17 @@ public class UpdateSettingsIT extends ESIntegTestCase { logger.info("test: test done"); } - private static class MockAppender extends AppenderSkeleton { + private static class MockAppender extends AbstractAppender { public boolean sawUpdateMaxThreadCount; public boolean sawUpdateAutoThrottle; + public MockAppender(final String name) throws IllegalAccessException { + super(name, RegexFilter.createFilter(".*(\n.*)*", new String[0], true, null, null), null); + } + @Override - protected void append(LoggingEvent event) { - String message = event.getMessage().toString(); + public void append(LogEvent event) { + String message = event.getMessage().getFormattedMessage(); if (event.getLevel() == Level.TRACE && event.getLoggerName().endsWith("lucene.iw")) { } @@ -366,22 +374,14 @@ public class UpdateSettingsIT extends ESIntegTestCase { } } - @Override - public boolean requiresLayout() { - return false; - } - - @Override - public void close() { - } } - public void testUpdateAutoThrottleSettings() { - MockAppender mockAppender = new MockAppender(); - Logger rootLogger = Logger.getRootLogger(); + public void testUpdateAutoThrottleSettings() throws IllegalAccessException { + MockAppender mockAppender = new MockAppender("testUpdateAutoThrottleSettings"); + Logger rootLogger = LogManager.getRootLogger(); Level savedLevel = rootLogger.getLevel(); - rootLogger.addAppender(mockAppender); - rootLogger.setLevel(Level.TRACE); + TestLoggers.addAppender(rootLogger, mockAppender); + Loggers.setLevel(rootLogger, Level.TRACE); try { // No throttling at first, only 1 non-replicated shard, force lots of merging: @@ -412,18 +412,18 @@ public class UpdateSettingsIT extends ESIntegTestCase { GetSettingsResponse getSettingsResponse = client().admin().indices().prepareGetSettings("test").get(); assertThat(getSettingsResponse.getSetting("test", MergeSchedulerConfig.AUTO_THROTTLE_SETTING.getKey()), equalTo("false")); } finally { - rootLogger.removeAppender(mockAppender); - rootLogger.setLevel(savedLevel); + TestLoggers.removeAppender(rootLogger, mockAppender); + Loggers.setLevel(rootLogger, savedLevel); } } // #6882: make sure we can change index.merge.scheduler.max_thread_count live - public void testUpdateMergeMaxThreadCount() { - MockAppender mockAppender = new MockAppender(); - Logger rootLogger = Logger.getRootLogger(); + public void testUpdateMergeMaxThreadCount() throws IllegalAccessException { + MockAppender mockAppender = new MockAppender("testUpdateMergeMaxThreadCount"); + Logger rootLogger = LogManager.getRootLogger(); Level savedLevel = rootLogger.getLevel(); - rootLogger.addAppender(mockAppender); - rootLogger.setLevel(Level.TRACE); + TestLoggers.addAppender(rootLogger, mockAppender); + Loggers.setLevel(rootLogger, Level.TRACE); try { @@ -456,8 +456,8 @@ public class UpdateSettingsIT extends ESIntegTestCase { assertThat(getSettingsResponse.getSetting("test", MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey()), equalTo("1")); } finally { - rootLogger.removeAppender(mockAppender); - rootLogger.setLevel(savedLevel); + TestLoggers.removeAppender(rootLogger, mockAppender); + Loggers.setLevel(rootLogger, savedLevel); } } diff --git a/core/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java b/core/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java index 66687ea74fa..0515887a550 100644 --- a/core/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java +++ b/core/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.indices.state; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; @@ -28,7 +29,6 @@ import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.ShardRoutingState; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexNotFoundException; @@ -43,7 +43,7 @@ import static org.hamcrest.Matchers.nullValue; */ @ESIntegTestCase.ClusterScope(minNumDataNodes = 2) public class SimpleIndexStateIT extends ESIntegTestCase { - private final ESLogger logger = Loggers.getLogger(SimpleIndexStateIT.class); + private final Logger logger = Loggers.getLogger(SimpleIndexStateIT.class); public void testSimpleOpenClose() { logger.info("--> creating test index"); diff --git a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java index f2841fc52b7..b248fc811f6 100644 --- a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.indices.store; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.cluster.ClusterState; @@ -37,7 +38,6 @@ import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationComman import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.env.Environment; @@ -474,11 +474,11 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase { * the ShardActiveRequest. */ public static class ReclocationStartEndTracer extends MockTransportService.Tracer { - private final ESLogger logger; + private final Logger logger; private final CountDownLatch beginRelocationLatch; private final CountDownLatch receivedShardExistsRequestLatch; - public ReclocationStartEndTracer(ESLogger logger, CountDownLatch beginRelocationLatch, CountDownLatch receivedShardExistsRequestLatch) { + public ReclocationStartEndTracer(Logger logger, CountDownLatch beginRelocationLatch, CountDownLatch receivedShardExistsRequestLatch) { this.logger = logger; this.beginRelocationLatch = beginRelocationLatch; this.receivedShardExistsRequestLatch = receivedShardExistsRequestLatch; diff --git a/core/src/test/java/org/elasticsearch/monitor/jvm/JvmGcMonitorServiceTests.java b/core/src/test/java/org/elasticsearch/monitor/jvm/JvmGcMonitorServiceTests.java index ab5b1ac4750..972072c0ebd 100644 --- a/core/src/test/java/org/elasticsearch/monitor/jvm/JvmGcMonitorServiceTests.java +++ b/core/src/test/java/org/elasticsearch/monitor/jvm/JvmGcMonitorServiceTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.monitor.jvm; -import org.elasticsearch.common.logging.ESLogger; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.test.ESTestCase; @@ -32,7 +32,7 @@ import static org.mockito.Mockito.when; public class JvmGcMonitorServiceTests extends ESTestCase { public void testSlowGcLogging() { - final ESLogger logger = mock(ESLogger.class); + final Logger logger = mock(Logger.class); when(logger.isWarnEnabled()).thenReturn(true); when(logger.isInfoEnabled()).thenReturn(true); when(logger.isDebugEnabled()).thenReturn(true); @@ -138,7 +138,7 @@ public class JvmGcMonitorServiceTests extends ESTestCase { final int current = randomIntBetween(1, Integer.MAX_VALUE); final long elapsed = randomIntBetween(current, Integer.MAX_VALUE); final long seq = randomIntBetween(1, Integer.MAX_VALUE); - final ESLogger logger = mock(ESLogger.class); + final Logger logger = mock(Logger.class); when(logger.isWarnEnabled()).thenReturn(true); when(logger.isInfoEnabled()).thenReturn(true); when(logger.isDebugEnabled()).thenReturn(true); diff --git a/core/src/test/java/org/elasticsearch/monitor/jvm/JvmInfoTests.java b/core/src/test/java/org/elasticsearch/monitor/jvm/JvmInfoTests.java index 82b264ae1d6..131593cd114 100644 --- a/core/src/test/java/org/elasticsearch/monitor/jvm/JvmInfoTests.java +++ b/core/src/test/java/org/elasticsearch/monitor/jvm/JvmInfoTests.java @@ -57,5 +57,4 @@ public class JvmInfoTests extends ESTestCase { final int index = argline.lastIndexOf(flag); return argline.charAt(index - 1) == '+'; } - } diff --git a/core/src/test/java/org/elasticsearch/monitor/os/OsProbeTests.java b/core/src/test/java/org/elasticsearch/monitor/os/OsProbeTests.java index 2edaad5c4ba..2e085a80700 100644 --- a/core/src/test/java/org/elasticsearch/monitor/os/OsProbeTests.java +++ b/core/src/test/java/org/elasticsearch/monitor/os/OsProbeTests.java @@ -32,24 +32,28 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThanOrEqualTo; public class OsProbeTests extends ESTestCase { - OsProbe probe = OsProbe.getInstance(); + private final OsProbe probe = OsProbe.getInstance(); public void testOsInfo() { - OsInfo info = probe.osInfo(); + int allocatedProcessors = randomIntBetween(1, Runtime.getRuntime().availableProcessors()); + long refreshInterval = randomBoolean() ? -1 : randomPositiveLong(); + OsInfo info = probe.osInfo(refreshInterval, allocatedProcessors); assertNotNull(info); - assertThat(info.getRefreshInterval(), anyOf(equalTo(-1L), greaterThanOrEqualTo(0L))); - assertThat(info.getName(), equalTo(Constants.OS_NAME)); - assertThat(info.getArch(), equalTo(Constants.OS_ARCH)); - assertThat(info.getVersion(), equalTo(Constants.OS_VERSION)); - assertThat(info.getAvailableProcessors(), equalTo(Runtime.getRuntime().availableProcessors())); + assertEquals(refreshInterval, info.getRefreshInterval()); + assertEquals(Constants.OS_NAME, info.getName()); + assertEquals(Constants.OS_ARCH, info.getArch()); + assertEquals(Constants.OS_VERSION, info.getVersion()); + assertEquals(allocatedProcessors, info.getAllocatedProcessors()); + assertEquals(Runtime.getRuntime().availableProcessors(), info.getAvailableProcessors()); } public void testOsStats() { OsStats stats = probe.osStats(); assertNotNull(stats); assertThat(stats.getTimestamp(), greaterThan(0L)); - assertThat(stats.getCpu().getPercent(), anyOf(equalTo((short) -1), is(both(greaterThanOrEqualTo((short) 0)).and(lessThanOrEqualTo((short) 100))))); - double[] loadAverage = stats.getCpu().loadAverage; + assertThat(stats.getCpu().getPercent(), anyOf(equalTo((short) -1), + is(both(greaterThanOrEqualTo((short) 0)).and(lessThanOrEqualTo((short) 100))))); + double[] loadAverage = stats.getCpu().getLoadAverage(); if (loadAverage != null) { assertThat(loadAverage.length, equalTo(3)); } diff --git a/core/src/test/java/org/elasticsearch/monitor/os/OsStatsTests.java b/core/src/test/java/org/elasticsearch/monitor/os/OsStatsTests.java new file mode 100644 index 00000000000..30d527311b3 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/monitor/os/OsStatsTests.java @@ -0,0 +1,55 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.monitor.os; + +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +public class OsStatsTests extends ESTestCase { + + public void testSerialization() throws IOException { + int numLoadAverages = randomIntBetween(1, 5); + double loadAverages[] = new double[numLoadAverages]; + for (int i = 0; i < loadAverages.length; i++) { + loadAverages[i] = randomDouble(); + } + OsStats.Cpu cpu = new OsStats.Cpu(randomShort(), loadAverages); + OsStats.Mem mem = new OsStats.Mem(randomLong(), randomLong()); + OsStats.Swap swap = new OsStats.Swap(randomLong(), randomLong()); + OsStats osStats = new OsStats(System.currentTimeMillis(), cpu, mem, swap); + + try (BytesStreamOutput out = new BytesStreamOutput()) { + osStats.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + OsStats deserializedOsStats = new OsStats(in); + assertEquals(osStats.getTimestamp(), deserializedOsStats.getTimestamp()); + assertEquals(osStats.getCpu().getPercent(), deserializedOsStats.getCpu().getPercent()); + assertArrayEquals(osStats.getCpu().getLoadAverage(), deserializedOsStats.getCpu().getLoadAverage(), 0); + assertEquals(osStats.getMem().getFree(), deserializedOsStats.getMem().getFree()); + assertEquals(osStats.getMem().getTotal(), deserializedOsStats.getMem().getTotal()); + assertEquals(osStats.getSwap().getFree(), deserializedOsStats.getSwap().getFree()); + assertEquals(osStats.getSwap().getTotal(), deserializedOsStats.getSwap().getTotal()); + } + } + } +} diff --git a/core/src/test/java/org/elasticsearch/monitor/process/ProcessProbeTests.java b/core/src/test/java/org/elasticsearch/monitor/process/ProcessProbeTests.java index 8e6016f6f98..5423242ccd8 100644 --- a/core/src/test/java/org/elasticsearch/monitor/process/ProcessProbeTests.java +++ b/core/src/test/java/org/elasticsearch/monitor/process/ProcessProbeTests.java @@ -33,14 +33,15 @@ import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; public class ProcessProbeTests extends ESTestCase { - ProcessProbe probe = ProcessProbe.getInstance(); + private final ProcessProbe probe = ProcessProbe.getInstance(); public void testProcessInfo() { - ProcessInfo info = probe.processInfo(); + long refreshInterval = randomPositiveLong(); + ProcessInfo info = probe.processInfo(refreshInterval); assertNotNull(info); - assertThat(info.getRefreshInterval(), greaterThanOrEqualTo(0L)); - assertThat(info.getId(), equalTo(jvmInfo().pid())); - assertThat(info.isMlockall(), equalTo(BootstrapInfo.isMemoryLocked())); + assertEquals(refreshInterval, info.getRefreshInterval()); + assertEquals(jvmInfo().pid(), info.getId()); + assertEquals(BootstrapInfo.isMemoryLocked(), info.isMlockall()); } public void testProcessStats() { diff --git a/core/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java b/core/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java index 7cd4e355218..d9134ba5cf3 100644 --- a/core/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java +++ b/core/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.nodesinfo; import org.elasticsearch.Build; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.PluginsAndModules; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -35,11 +34,11 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.http.HttpInfo; import org.elasticsearch.ingest.IngestInfo; +import org.elasticsearch.ingest.ProcessorInfo; import org.elasticsearch.monitor.jvm.JvmInfo; -import org.elasticsearch.monitor.os.DummyOsInfo; import org.elasticsearch.monitor.os.OsInfo; import org.elasticsearch.monitor.process.ProcessInfo; -import org.elasticsearch.plugins.DummyPluginInfo; +import org.elasticsearch.plugins.PluginInfo; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.threadpool.ThreadPool; @@ -48,7 +47,6 @@ import org.elasticsearch.transport.TransportInfo; import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -58,25 +56,20 @@ import static java.util.Collections.emptySet; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.core.IsEqual.equalTo; -/** - * - */ public class NodeInfoStreamingTests extends ESTestCase { public void testNodeInfoStreaming() throws IOException { NodeInfo nodeInfo = createNodeInfo(); - Version version = Version.CURRENT; - BytesStreamOutput out = new BytesStreamOutput(); - out.setVersion(version); - nodeInfo.writeTo(out); - out.close(); - StreamInput in = out.bytes().streamInput(); - in.setVersion(version); - NodeInfo readNodeInfo = NodeInfo.readNodeInfo(in); - assertExpectedUnchanged(nodeInfo, readNodeInfo); - + try (BytesStreamOutput out = new BytesStreamOutput()) { + nodeInfo.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + NodeInfo readNodeInfo = NodeInfo.readNodeInfo(in); + assertExpectedUnchanged(nodeInfo, readNodeInfo); + } + } } - // checks all properties that are expected to be unchanged. Once we start changing them between versions this method has to be changed as well + // checks all properties that are expected to be unchanged. + // Once we start changing them between versions this method has to be changed as well private void assertExpectedUnchanged(NodeInfo nodeInfo, NodeInfo readNodeInfo) throws IOException { assertThat(nodeInfo.getBuild().toString(), equalTo(readNodeInfo.getBuild().toString())); assertThat(nodeInfo.getHostname(), equalTo(readNodeInfo.getHostname())); @@ -89,24 +82,15 @@ public class NodeInfoStreamingTests extends ESTestCase { compareJsonOutput(nodeInfo.getTransport(), readNodeInfo.getTransport()); compareJsonOutput(nodeInfo.getNode(), readNodeInfo.getNode()); compareJsonOutput(nodeInfo.getOs(), readNodeInfo.getOs()); - comparePluginsAndModules(nodeInfo, readNodeInfo); + compareJsonOutput(nodeInfo.getPlugins(), readNodeInfo.getPlugins()); compareJsonOutput(nodeInfo.getIngest(), readNodeInfo.getIngest()); } - private void comparePluginsAndModules(NodeInfo nodeInfo, NodeInfo readNodeInfo) throws IOException { - ToXContent.Params params = ToXContent.EMPTY_PARAMS; - XContentBuilder pluginsAndModules = jsonBuilder(); - pluginsAndModules.startObject(); - nodeInfo.getPlugins().toXContent(pluginsAndModules, params); - pluginsAndModules.endObject(); - XContentBuilder readPluginsAndModules = jsonBuilder(); - readPluginsAndModules.startObject(); - readNodeInfo.getPlugins().toXContent(readPluginsAndModules, params); - readPluginsAndModules.endObject(); - assertThat(pluginsAndModules.string(), equalTo(readPluginsAndModules.string())); - } - private void compareJsonOutput(ToXContent param1, ToXContent param2) throws IOException { + if (param1 == null) { + assertNull(param2); + return; + } ToXContent.Params params = ToXContent.EMPTY_PARAMS; XContentBuilder param1Builder = jsonBuilder(); param1Builder.startObject(); @@ -120,36 +104,73 @@ public class NodeInfoStreamingTests extends ESTestCase { assertThat(param1Builder.string(), equalTo(param2Builder.string())); } - private NodeInfo createNodeInfo() { + private static NodeInfo createNodeInfo() { Build build = Build.CURRENT; DiscoveryNode node = new DiscoveryNode("test_node", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), VersionUtils.randomVersion(random())); - Map serviceAttributes = new HashMap<>(); - serviceAttributes.put("test", "attribute"); - Settings settings = Settings.builder().put("test", "setting").build(); - OsInfo osInfo = DummyOsInfo.INSTANCE; - ProcessInfo process = new ProcessInfo(randomInt(), randomBoolean()); - JvmInfo jvm = JvmInfo.jvmInfo(); - List threadPoolInfos = new ArrayList<>(); - threadPoolInfos.add(new ThreadPool.Info("test_threadpool", ThreadPool.ThreadPoolType.FIXED, 5)); - ThreadPoolInfo threadPoolInfo = new ThreadPoolInfo(threadPoolInfos); + Settings settings = randomBoolean() ? null : Settings.builder().put("test", "setting").build(); + OsInfo osInfo = null; + if (randomBoolean()) { + int availableProcessors = randomIntBetween(1, 64); + int allocatedProcessors = randomIntBetween(1, availableProcessors); + long refreshInterval = randomBoolean() ? -1 : randomPositiveLong(); + String name = randomAsciiOfLengthBetween(3, 10); + String arch = randomAsciiOfLengthBetween(3, 10); + String version = randomAsciiOfLengthBetween(3, 10); + osInfo = new OsInfo(refreshInterval, availableProcessors, allocatedProcessors, name, arch, version); + } + ProcessInfo process = randomBoolean() ? null : new ProcessInfo(randomInt(), randomBoolean(), randomPositiveLong()); + JvmInfo jvm = randomBoolean() ? null : JvmInfo.jvmInfo(); + ThreadPoolInfo threadPoolInfo = null; + if (randomBoolean()) { + int numThreadPools = randomIntBetween(1, 10); + List threadPoolInfos = new ArrayList<>(numThreadPools); + for (int i = 0; i < numThreadPools; i++) { + threadPoolInfos.add(new ThreadPool.Info(randomAsciiOfLengthBetween(3, 10), + randomFrom(ThreadPool.ThreadPoolType.values()), randomInt())); + } + threadPoolInfo = new ThreadPoolInfo(threadPoolInfos); + } Map profileAddresses = new HashMap<>(); - BoundTransportAddress dummyBoundTransportAddress = new BoundTransportAddress(new TransportAddress[]{LocalTransportAddress.buildUnique()}, LocalTransportAddress.buildUnique()); + BoundTransportAddress dummyBoundTransportAddress = new BoundTransportAddress( + new TransportAddress[]{LocalTransportAddress.buildUnique()}, LocalTransportAddress.buildUnique()); profileAddresses.put("test_address", dummyBoundTransportAddress); - TransportInfo transport = new TransportInfo(dummyBoundTransportAddress, profileAddresses); - HttpInfo htttpInfo = new HttpInfo(dummyBoundTransportAddress, randomLong()); - PluginsAndModules plugins = new PluginsAndModules(); - plugins.addModule(DummyPluginInfo.INSTANCE); - plugins.addPlugin(DummyPluginInfo.INSTANCE); - IngestInfo ingestInfo = new IngestInfo(Collections.emptyList()); - ByteSizeValue indexingBuffer; - if (random().nextBoolean()) { - indexingBuffer = null; - } else { + TransportInfo transport = randomBoolean() ? null : new TransportInfo(dummyBoundTransportAddress, profileAddresses); + HttpInfo httpInfo = randomBoolean() ? null : new HttpInfo(dummyBoundTransportAddress, randomLong()); + + PluginsAndModules pluginsAndModules = null; + if (randomBoolean()) { + int numPlugins = randomIntBetween(0, 5); + List plugins = new ArrayList<>(); + for (int i = 0; i < numPlugins; i++) { + plugins.add(new PluginInfo(randomAsciiOfLengthBetween(3, 10), randomAsciiOfLengthBetween(3, 10), + randomAsciiOfLengthBetween(3, 10), randomAsciiOfLengthBetween(3, 10))); + } + int numModules = randomIntBetween(0, 5); + List modules = new ArrayList<>(); + for (int i = 0; i < numModules; i++) { + modules.add(new PluginInfo(randomAsciiOfLengthBetween(3, 10), randomAsciiOfLengthBetween(3, 10), + randomAsciiOfLengthBetween(3, 10), randomAsciiOfLengthBetween(3, 10))); + } + pluginsAndModules = new PluginsAndModules(plugins, modules); + } + + IngestInfo ingestInfo = null; + if (randomBoolean()) { + int numProcessors = randomIntBetween(0, 5); + List processors = new ArrayList<>(numProcessors); + for (int i = 0; i < numProcessors; i++) { + processors.add(new ProcessorInfo(randomAsciiOfLengthBetween(3, 10))); + } + ingestInfo = new IngestInfo(processors); + } + + ByteSizeValue indexingBuffer = null; + if (randomBoolean()) { // pick a random long that sometimes exceeds an int: indexingBuffer = new ByteSizeValue(random().nextLong() & ((1L<<40)-1)); } return new NodeInfo(VersionUtils.randomVersion(random()), build, node, settings, osInfo, process, jvm, - threadPoolInfo, transport, htttpInfo, plugins, ingestInfo, indexingBuffer); + threadPoolInfo, transport, httpInfo, pluginsAndModules, ingestInfo, indexingBuffer); } } diff --git a/core/src/test/java/org/elasticsearch/plugins/PluginInfoTests.java b/core/src/test/java/org/elasticsearch/plugins/PluginInfoTests.java index 73b31b92637..4ad52be8866 100644 --- a/core/src/test/java/org/elasticsearch/plugins/PluginInfoTests.java +++ b/core/src/test/java/org/elasticsearch/plugins/PluginInfoTests.java @@ -23,8 +23,9 @@ import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.node.info.PluginsAndModules; import org.elasticsearch.test.ESTestCase; -import java.nio.file.Files; import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.stream.Collectors; @@ -201,15 +202,17 @@ public class PluginInfoTests extends ESTestCase { } public void testPluginListSorted() { - PluginsAndModules pluginsInfo = new PluginsAndModules(); - pluginsInfo.addPlugin(new PluginInfo("c", "foo", "dummy", "dummyclass")); - pluginsInfo.addPlugin(new PluginInfo("b", "foo", "dummy", "dummyclass")); - pluginsInfo.addPlugin(new PluginInfo("e", "foo", "dummy", "dummyclass")); - pluginsInfo.addPlugin(new PluginInfo("a", "foo", "dummy", "dummyclass")); - pluginsInfo.addPlugin(new PluginInfo("d", "foo", "dummy", "dummyclass")); + List plugins = new ArrayList<>(); + plugins.add(new PluginInfo("c", "foo", "dummy", "dummyclass")); + plugins.add(new PluginInfo("b", "foo", "dummy", "dummyclass")); + plugins.add(new PluginInfo("e", "foo", "dummy", "dummyclass")); + plugins.add(new PluginInfo("a", "foo", "dummy", "dummyclass")); + plugins.add(new PluginInfo("d", "foo", "dummy", "dummyclass")); + PluginsAndModules pluginsInfo = new PluginsAndModules(plugins, Collections.emptyList()); + final List infos = pluginsInfo.getPluginInfos(); - List names = infos.stream().map((input) -> input.getName()).collect(Collectors.toList()); + List names = infos.stream().map(PluginInfo::getName).collect(Collectors.toList()); assertThat(names, contains("a", "b", "c", "d", "e")); } } diff --git a/core/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java b/core/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java index 87e5b0118f5..71c96b85fd5 100644 --- a/core/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java +++ b/core/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java @@ -19,16 +19,16 @@ package org.elasticsearch.recovery; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -42,6 +42,7 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.junit.annotations.TestLogging; import java.util.Arrays; +import java.util.Set; import java.util.concurrent.TimeUnit; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; @@ -54,7 +55,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTi @TestLogging("_root:DEBUG,index.shard:TRACE") public class RecoveryWhileUnderLoadIT extends ESIntegTestCase { - private final ESLogger logger = Loggers.getLogger(RecoveryWhileUnderLoadIT.class); + private final Logger logger = Loggers.getLogger(RecoveryWhileUnderLoadIT.class); public void testRecoverWhileUnderLoadAllocateReplicasTest() throws Exception { logger.info("--> creating test index ..."); @@ -105,7 +106,7 @@ public class RecoveryWhileUnderLoadIT extends ESIntegTestCase { logger.info("--> refreshing the index"); refreshAndAssert(); logger.info("--> verifying indexed content"); - iterateAssertCount(numberOfShards, indexer.totalIndexedDocs(), 10); + iterateAssertCount(numberOfShards, 10, indexer.getIds()); } } @@ -156,7 +157,7 @@ public class RecoveryWhileUnderLoadIT extends ESIntegTestCase { logger.info("--> refreshing the index"); refreshAndAssert(); logger.info("--> verifying indexed content"); - iterateAssertCount(numberOfShards, indexer.totalIndexedDocs(), 10); + iterateAssertCount(numberOfShards, 10, indexer.getIds()); } } @@ -225,7 +226,7 @@ public class RecoveryWhileUnderLoadIT extends ESIntegTestCase { logger.info("--> refreshing the index"); refreshAndAssert(); logger.info("--> verifying indexed content"); - iterateAssertCount(numberOfShards, indexer.totalIndexedDocs(), 10); + iterateAssertCount(numberOfShards, 10, indexer.getIds()); } } @@ -263,11 +264,12 @@ public class RecoveryWhileUnderLoadIT extends ESIntegTestCase { logger.info("--> refreshing the index"); refreshAndAssert(); logger.info("--> verifying indexed content"); - iterateAssertCount(numShards, indexer.totalIndexedDocs(), 10); + iterateAssertCount(numShards, 10, indexer.getIds()); } } - private void iterateAssertCount(final int numberOfShards, final long numberOfDocs, final int iterations) throws Exception { + private void iterateAssertCount(final int numberOfShards, final int iterations, final Set ids) throws Exception { + final long numberOfDocs = ids.size(); SearchResponse[] iterationResults = new SearchResponse[iterations]; boolean error = false; for (int i = 0; i < iterations; i++) { @@ -290,12 +292,11 @@ public class RecoveryWhileUnderLoadIT extends ESIntegTestCase { ClusterService clusterService = clusterService(); final ClusterState state = clusterService.state(); for (int shard = 0; shard < numberOfShards; shard++) { - // background indexer starts using ids on 1 - for (int id = 1; id <= numberOfDocs; id++) { - ShardId docShard = clusterService.operationRouting().shardId(state, "test", Long.toString(id), null); + for (String id : ids) { + ShardId docShard = clusterService.operationRouting().shardId(state, "test", id, null); if (docShard.id() == shard) { for (ShardRouting shardRouting : state.routingTable().shardRoutingTable("test", shard)) { - GetResponse response = client().prepareGet("test", "type", Long.toString(id)) + GetResponse response = client().prepareGet("test", "type", id) .setPreference("_only_nodes:" + shardRouting.currentNodeId()).get(); if (response.isExists()) { logger.info("missing id [{}] on shard {}", id, shardRouting); @@ -321,6 +322,7 @@ public class RecoveryWhileUnderLoadIT extends ESIntegTestCase { TimeUnit.MINUTES ) ); + assertEquals(numberOfDocs, ids.size()); } //lets now make the test fail if it was supposed to fail diff --git a/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java b/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java index c8887d5108e..8493a08d704 100644 --- a/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java +++ b/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java @@ -232,13 +232,8 @@ public class RelocationIT extends ESIntegTestCase { logger.error("Extra id [{}]", id); } } - set.forEach(new IntProcedure() { - - @Override - public void apply(int value) { - logger.error("Missing id [{}]", value); - } - + set.forEach((IntProcedure) value -> { + logger.error("Missing id [{}]", value); }); } assertThat(hits.totalHits(), equalTo(indexer.totalIndexedDocs())); @@ -363,7 +358,7 @@ public class RelocationIT extends ESIntegTestCase { List requests = new ArrayList<>(); int numDocs = scaledRandomIntBetween(25, 250); for (int i = 0; i < numDocs; i++) { - requests.add(client().prepareIndex(indexName, "type").setCreate(true).setSource("{}")); + requests.add(client().prepareIndex(indexName, "type").setSource("{}")); } indexRandom(true, requests); assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("3").setWaitForGreenStatus().get().isTimedOut()); diff --git a/core/src/test/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeActionTests.java b/core/src/test/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeActionTests.java index de188ba9e91..9b7d4073d0d 100644 --- a/core/src/test/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeActionTests.java +++ b/core/src/test/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeActionTests.java @@ -27,7 +27,6 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.test.ESTestCase; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.startsWith; @@ -90,14 +89,10 @@ public class RestAnalyzeActionTests extends ESTestCase { public void testParseXContentForAnalyzeRequestWithInvalidJsonThrowsException() throws Exception { AnalyzeRequest analyzeRequest = new AnalyzeRequest("for test"); - - try { - RestAnalyzeAction.buildFromContent(new BytesArray("{invalid_json}"), analyzeRequest, new ParseFieldMatcher(Settings.EMPTY)); - fail("shouldn't get here"); - } catch (Exception e) { - assertThat(e, instanceOf(IllegalArgumentException.class)); - assertThat(e.getMessage(), equalTo("Failed to parse request body")); - } + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> RestAnalyzeAction.buildFromContent( + new BytesArray("{invalid_json}"), analyzeRequest, new ParseFieldMatcher(Settings.EMPTY))); + assertThat(e.getMessage(), equalTo("Failed to parse request body")); } public void testParseXContentForAnalyzeRequestWithUnknownParamThrowsException() throws Exception { @@ -107,14 +102,9 @@ public class RestAnalyzeActionTests extends ESTestCase { .field("text", "THIS IS A TEST") .field("unknown", "keyword") .endObject().bytes(); - - try { - RestAnalyzeAction.buildFromContent(invalidContent, analyzeRequest, new ParseFieldMatcher(Settings.EMPTY)); - fail("shouldn't get here"); - } catch (Exception e) { - assertThat(e, instanceOf(IllegalArgumentException.class)); - assertThat(e.getMessage(), startsWith("Unknown parameter [unknown]")); - } + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> RestAnalyzeAction.buildFromContent(invalidContent, analyzeRequest, new ParseFieldMatcher(Settings.EMPTY))); + assertThat(e.getMessage(), startsWith("Unknown parameter [unknown]")); } public void testParseXContentForAnalyzeRequestWithInvalidStringExplainParamThrowsException() throws Exception { @@ -123,64 +113,57 @@ public class RestAnalyzeActionTests extends ESTestCase { .startObject() .field("explain", "fals") .endObject().bytes(); - try { - RestAnalyzeAction.buildFromContent(invalidExplain, analyzeRequest, new ParseFieldMatcher(Settings.EMPTY)); - fail("shouldn't get here"); - } catch (Exception e) { - assertThat(e, instanceOf(IllegalArgumentException.class)); - assertThat(e.getMessage(), startsWith("explain must be either 'true' or 'false'")); - } + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> RestAnalyzeAction.buildFromContent(invalidExplain, analyzeRequest, new ParseFieldMatcher(Settings.EMPTY))); + assertThat(e.getMessage(), startsWith("explain must be either 'true' or 'false'")); } public void testDeprecatedParamException() throws Exception { - BytesReference content = XContentFactory.jsonBuilder() - .startObject() - .field("text", "THIS IS A TEST") - .field("tokenizer", "keyword") - .array("filters", "lowercase") - .endObject().bytes(); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> RestAnalyzeAction.buildFromContent( + XContentFactory.jsonBuilder() + .startObject() + .field("text", "THIS IS A TEST") + .field("tokenizer", "keyword") + .array("filters", "lowercase") + .endObject().bytes(), + new AnalyzeRequest("for test"), new ParseFieldMatcher(Settings.EMPTY))); + assertThat(e.getMessage(), startsWith("Unknown parameter [filters]")); - AnalyzeRequest analyzeRequest = new AnalyzeRequest("for test"); - try { - RestAnalyzeAction.buildFromContent(content, analyzeRequest, new ParseFieldMatcher(Settings.EMPTY)); - } catch (Exception e) { - assertThat(e, instanceOf(IllegalArgumentException.class)); - assertThat(e.getMessage(), startsWith("Unknown parameter [filters]")); - } + e = expectThrows(IllegalArgumentException.class, + () -> RestAnalyzeAction.buildFromContent( + XContentFactory.jsonBuilder() + .startObject() + .field("text", "THIS IS A TEST") + .field("tokenizer", "keyword") + .array("token_filters", "lowercase") + .endObject().bytes(), + new AnalyzeRequest("for test"), new ParseFieldMatcher(Settings.EMPTY))); + assertThat(e.getMessage(), startsWith("Unknown parameter [token_filters]")); - content = XContentFactory.jsonBuilder() - .startObject() - .field("text", "THIS IS A TEST") - .field("tokenizer", "keyword") - .array("token_filters", "lowercase") - .endObject().bytes(); - analyzeRequest = new AnalyzeRequest("for test"); - - try { - RestAnalyzeAction.buildFromContent(content, analyzeRequest, new ParseFieldMatcher(Settings.EMPTY)); - } catch (Exception e) { - assertThat(e, instanceOf(IllegalArgumentException.class)); - assertThat(e.getMessage(), startsWith("Unknown parameter [token_filters]")); - } - - content = XContentFactory.jsonBuilder() - .startObject() - .field("text", "THIS IS A TEST") - .field("tokenizer", "keyword") - .array("char_filters", "lowercase") - .endObject().bytes(); - - analyzeRequest = new AnalyzeRequest("for test"); - - try { - RestAnalyzeAction.buildFromContent(content, analyzeRequest, new ParseFieldMatcher(Settings.EMPTY)); - } catch (Exception e) { - assertThat(e, instanceOf(IllegalArgumentException.class)); - assertThat(e.getMessage(), startsWith("Unknown parameter [char_filters]")); - } + e = expectThrows(IllegalArgumentException.class, + () -> RestAnalyzeAction.buildFromContent( + XContentFactory.jsonBuilder() + .startObject() + .field("text", "THIS IS A TEST") + .field("tokenizer", "keyword") + .array("char_filters", "lowercase") + .endObject().bytes(), + new AnalyzeRequest("for test"), new ParseFieldMatcher(Settings.EMPTY))); + assertThat(e.getMessage(), startsWith("Unknown parameter [char_filters]")); + e = expectThrows(IllegalArgumentException.class, + () -> RestAnalyzeAction.buildFromContent( + XContentFactory.jsonBuilder() + .startObject() + .field("text", "THIS IS A TEST") + .field("tokenizer", "keyword") + .array("token_filter", "lowercase") + .endObject().bytes() + , new AnalyzeRequest("for test"), new ParseFieldMatcher(Settings.EMPTY))); + assertThat(e.getMessage(), startsWith("Unknown parameter [token_filter]")); } } diff --git a/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java b/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java index 3e07d3c170d..7b345b137b9 100644 --- a/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java +++ b/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java @@ -45,6 +45,7 @@ import java.nio.file.Path; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.Map; import static org.hamcrest.CoreMatchers.containsString; @@ -86,7 +87,9 @@ public class ScriptServiceTests extends ESTestCase { resourceWatcherService = new ResourceWatcherService(baseSettings, null); scriptEngineService = new TestEngineService(); dangerousScriptEngineService = new TestDangerousEngineService(); - scriptEnginesByLangMap = ScriptModesTests.buildScriptEnginesByLangMap(Collections.singleton(scriptEngineService)); + TestEngineService defaultScriptServiceEngine = new TestEngineService(Script.DEFAULT_SCRIPT_LANG) {}; + scriptEnginesByLangMap = ScriptModesTests.buildScriptEnginesByLangMap( + new HashSet<>(Arrays.asList(scriptEngineService, defaultScriptServiceEngine))); //randomly register custom script contexts int randomInt = randomIntBetween(0, 3); //prevent duplicates using map @@ -103,7 +106,8 @@ public class ScriptServiceTests extends ESTestCase { String context = plugin + "_" + operation; contexts.put(context, new ScriptContext.Plugin(plugin, operation)); } - scriptEngineRegistry = new ScriptEngineRegistry(Arrays.asList(scriptEngineService, dangerousScriptEngineService)); + scriptEngineRegistry = new ScriptEngineRegistry(Arrays.asList(scriptEngineService, dangerousScriptEngineService, + defaultScriptServiceEngine)); scriptContextRegistry = new ScriptContextRegistry(contexts.values()); scriptSettings = new ScriptSettings(scriptEngineRegistry, scriptContextRegistry); scriptContexts = scriptContextRegistry.scriptContexts().toArray(new ScriptContext[scriptContextRegistry.scriptContexts().size()]); @@ -406,12 +410,11 @@ public class ScriptServiceTests extends ESTestCase { public void testDefaultLanguage() throws IOException { Settings.Builder builder = Settings.builder(); - builder.put("script.default_lang", "test"); builder.put("script.inline", "true"); buildScriptService(builder.build()); CompiledScript script = scriptService.compile(new Script("1 + 1", ScriptType.INLINE, null, null), randomFrom(scriptContexts), Collections.emptyMap()); - assertEquals(script.lang(), "test"); + assertEquals(script.lang(), Script.DEFAULT_SCRIPT_LANG); } public void testStoreScript() throws Exception { @@ -509,14 +512,24 @@ public class ScriptServiceTests extends ESTestCase { public static final String NAME = "test"; + private final String name; + + public TestEngineService() { + this(NAME); + } + + public TestEngineService(String name) { + this.name = name; + } + @Override public String getType() { - return NAME; + return name; } @Override public String getExtension() { - return NAME; + return name; } @Override diff --git a/core/src/test/java/org/elasticsearch/script/ScriptSettingsTests.java b/core/src/test/java/org/elasticsearch/script/ScriptSettingsTests.java index 120fd38b9a0..917650d36b8 100644 --- a/core/src/test/java/org/elasticsearch/script/ScriptSettingsTests.java +++ b/core/src/test/java/org/elasticsearch/script/ScriptSettingsTests.java @@ -34,32 +34,33 @@ import static org.hamcrest.Matchers.equalTo; public class ScriptSettingsTests extends ESTestCase { - public void testDefaultLanguageIsPainless() { + public void testDefaultLegacyLanguageIsPainless() { ScriptEngineRegistry scriptEngineRegistry = new ScriptEngineRegistry(Collections.singletonList(new CustomScriptEngineService())); ScriptContextRegistry scriptContextRegistry = new ScriptContextRegistry(Collections.emptyList()); ScriptSettings scriptSettings = new ScriptSettings(scriptEngineRegistry, scriptContextRegistry); - assertThat(scriptSettings.getDefaultScriptLanguageSetting().get(Settings.EMPTY), equalTo("painless")); + assertThat(scriptSettings.getDefaultLegacyScriptLanguageSetting().get(Settings.EMPTY), + equalTo(ScriptSettings.LEGACY_DEFAULT_LANG)); } - public void testCustomDefaultLanguage() { + public void testCustomLegacyDefaultLanguage() { ScriptEngineRegistry scriptEngineRegistry = new ScriptEngineRegistry(Collections.singletonList(new CustomScriptEngineService())); ScriptContextRegistry scriptContextRegistry = new ScriptContextRegistry(Collections.emptyList()); ScriptSettings scriptSettings = new ScriptSettings(scriptEngineRegistry, scriptContextRegistry); String defaultLanguage = CustomScriptEngineService.NAME; - Settings settings = Settings.builder().put("script.default_lang", defaultLanguage).build(); - assertThat(scriptSettings.getDefaultScriptLanguageSetting().get(settings), equalTo(defaultLanguage)); + Settings settings = Settings.builder().put(ScriptSettings.LEGACY_SCRIPT_SETTING, defaultLanguage).build(); + assertThat(scriptSettings.getDefaultLegacyScriptLanguageSetting().get(settings), equalTo(defaultLanguage)); } - public void testInvalidDefaultLanguage() { + public void testInvalidLegacyDefaultLanguage() { ScriptEngineRegistry scriptEngineRegistry = new ScriptEngineRegistry(Collections.singletonList(new CustomScriptEngineService())); ScriptContextRegistry scriptContextRegistry = new ScriptContextRegistry(Collections.emptyList()); ScriptSettings scriptSettings = new ScriptSettings(scriptEngineRegistry, scriptContextRegistry); - Settings settings = Settings.builder().put("script.default_lang", "C++").build(); + Settings settings = Settings.builder().put(ScriptSettings.LEGACY_SCRIPT_SETTING, "C++").build(); try { - scriptSettings.getDefaultScriptLanguageSetting().get(settings); + scriptSettings.getDefaultLegacyScriptLanguageSetting().get(settings); fail("should have seen unregistered default language"); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("unregistered default language [C++]")); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTestCase.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTestCase.java index fbd69755f24..b30dca1c9a0 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTestCase.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTestCase.java @@ -103,7 +103,7 @@ public abstract class ShardSizeTestCase extends ESIntegTestCase { protected List indexDoc(String shard, String key, int times) throws Exception { IndexRequestBuilder[] builders = new IndexRequestBuilder[times]; for (int i = 0; i < times; i++) { - builders[i] = client().prepareIndex("idx", "type").setRouting(shard).setCreate(true).setSource(jsonBuilder() + builders[i] = client().prepareIndex("idx", "type").setRouting(shard).setSource(jsonBuilder() .startObject() .field("key", key) .field("value", 1) diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java index 2d9d5ca043d..fab1f8b7d3e 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java @@ -20,12 +20,10 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.plugins.Plugin; @@ -49,6 +47,7 @@ import org.elasticsearch.search.aggregations.bucket.significant.heuristics.Signi import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristicParser; import org.elasticsearch.search.aggregations.bucket.terms.StringTerms; import org.elasticsearch.search.aggregations.bucket.terms.Terms; +import org.elasticsearch.search.aggregations.support.XContentParseContext; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.search.aggregations.bucket.SharedSignificantTermsTestMethods; @@ -172,7 +171,7 @@ public class SignificantTermsSignificanceScoreIT extends ESIntegTestCase { @Override public List> getSignificanceHeuristics() { return singletonList(new SearchExtensionSpec(SimpleHeuristic.NAME, - SimpleHeuristic::new, SimpleHeuristic::parse)); + SimpleHeuristic::new, (context) -> SimpleHeuristic.parse(context))); } @Override @@ -239,9 +238,9 @@ public class SignificantTermsSignificanceScoreIT extends ESIntegTestCase { return subsetFreq / subsetSize > supersetFreq / supersetSize ? 2.0 : 1.0; } - public static SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher) + public static SignificanceHeuristic parse(XContentParseContext context) throws IOException, QueryShardException { - parser.nextToken(); + context.getParser().nextToken(); return new SimpleHeuristic(); } } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java index 4e46a0b6a66..510df4c572b 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java @@ -18,6 +18,8 @@ */ package org.elasticsearch.search.aggregations.metrics; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.plugins.Plugin; @@ -477,7 +479,7 @@ public class StatsIT extends AbstractNumericTestCase { ShardSearchFailure[] failures = response.getShardFailures(); if (failures.length != expectedFailures) { for (ShardSearchFailure failure : failures) { - logger.error("Shard Failure: {}", failure.getCause(), failure); + logger.error((Supplier) () -> new ParameterizedMessage("Shard Failure: {}", failure), failure.getCause()); } fail("Unexpected shard failures!"); } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java index bf8db50ba80..45d44c863a4 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java @@ -219,7 +219,7 @@ public class TopHitsIT extends ESIntegTestCase { builder.endArray().endObject(); builders.add( - client().prepareIndex("articles", "article").setCreate(true).setSource(builder) + client().prepareIndex("articles", "article").setSource(builder) ); } diff --git a/core/src/test/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java b/core/src/test/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java index df9af970f9b..eeb14baf37a 100644 --- a/core/src/test/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java +++ b/core/src/test/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java @@ -26,7 +26,6 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.termvectors.TermVectorsRequest; import org.elasticsearch.action.termvectors.TermVectorsResponse; -import org.elasticsearch.common.Priority; import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; diff --git a/core/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java b/core/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java index 14f3e8e0bbc..e6d7700c845 100644 --- a/core/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java +++ b/core/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java @@ -19,12 +19,8 @@ package org.elasticsearch.search.geo; -import org.elasticsearch.common.logging.ESLoggerFactory; -import org.locationtech.spatial4j.context.SpatialContext; -import org.locationtech.spatial4j.distance.DistanceUtils; -import org.locationtech.spatial4j.exception.InvalidShapeException; -import org.locationtech.spatial4j.shape.Shape; - +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy; import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree; import org.apache.lucene.spatial.query.SpatialArgs; @@ -48,6 +44,7 @@ import org.elasticsearch.common.geo.builders.MultiPolygonBuilder; import org.elasticsearch.common.geo.builders.PolygonBuilder; import org.elasticsearch.common.geo.builders.ShapeBuilders; import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -59,6 +56,10 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.VersionUtils; import org.junit.BeforeClass; +import org.locationtech.spatial4j.context.SpatialContext; +import org.locationtech.spatial4j.distance.DistanceUtils; +import org.locationtech.spatial4j.exception.InvalidShapeException; +import org.locationtech.spatial4j.shape.Shape; import java.io.ByteArrayOutputStream; import java.io.FileNotFoundException; @@ -562,7 +563,10 @@ public class GeoFilterIT extends ESIntegTestCase { strategy.makeQuery(args); return true; } catch (UnsupportedSpatialOperation e) { - ESLoggerFactory.getLogger(GeoFilterIT.class.getName()).info("Unsupported spatial operation {}", e, relation); + final SpatialOperation finalRelation = relation; + ESLoggerFactory + .getLogger(GeoFilterIT.class.getName()) + .info((Supplier) () -> new ParameterizedMessage("Unsupported spatial operation {}", finalRelation), e); return false; } } diff --git a/core/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java b/core/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java index 6a4cc61e041..e96b02c0e2c 100644 --- a/core/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java @@ -182,7 +182,7 @@ public class ScriptSortBuilderTests extends AbstractSortTestCase) () -> new ParameterizedMessage("failed to execute [{}]", source), e); } }); diff --git a/core/src/test/java/org/elasticsearch/test/MockLogAppender.java b/core/src/test/java/org/elasticsearch/test/MockLogAppender.java index 8f10ccd6537..786319e2aee 100644 --- a/core/src/test/java/org/elasticsearch/test/MockLogAppender.java +++ b/core/src/test/java/org/elasticsearch/test/MockLogAppender.java @@ -18,9 +18,10 @@ */ package org.elasticsearch.test; -import org.apache.log4j.AppenderSkeleton; -import org.apache.log4j.Level; -import org.apache.log4j.spi.LoggingEvent; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.core.LogEvent; +import org.apache.logging.log4j.core.appender.AbstractAppender; +import org.apache.logging.log4j.core.filter.RegexFilter; import org.elasticsearch.common.regex.Regex; import java.util.ArrayList; @@ -32,13 +33,14 @@ import static org.hamcrest.MatcherAssert.assertThat; /** * Test appender that can be used to verify that certain events were logged correctly */ -public class MockLogAppender extends AppenderSkeleton { +public class MockLogAppender extends AbstractAppender { private static final String COMMON_PREFIX = System.getProperty("es.logger.prefix", "org.elasticsearch."); private List expectations; - public MockLogAppender() { + public MockLogAppender() throws IllegalAccessException { + super("mock", RegexFilter.createFilter(".*(\n.*)*", new String[0], true, null, null), null); expectations = new ArrayList<>(); } @@ -47,22 +49,12 @@ public class MockLogAppender extends AppenderSkeleton { } @Override - protected void append(LoggingEvent loggingEvent) { + public void append(LogEvent event) { for (LoggingExpectation expectation : expectations) { - expectation.match(loggingEvent); + expectation.match(event); } } - @Override - public void close() { - - } - - @Override - public boolean requiresLayout() { - return false; - } - public void assertAllExpectationsMatched() { for (LoggingExpectation expectation : expectations) { expectation.assertMatched(); @@ -70,7 +62,7 @@ public class MockLogAppender extends AppenderSkeleton { } public interface LoggingExpectation { - void match(LoggingEvent loggingEvent); + void match(LogEvent event); void assertMatched(); } @@ -91,10 +83,10 @@ public class MockLogAppender extends AppenderSkeleton { } @Override - public void match(LoggingEvent event) { - if (event.getLevel() == level && event.getLoggerName().equals(logger)) { + public void match(LogEvent event) { + if (event.getLevel().equals(level) && event.getLoggerName().equals(logger)) { if (Regex.isSimpleMatchPattern(message)) { - if (Regex.simpleMatch(message, event.getMessage().toString())) { + if (Regex.simpleMatch(message, event.getMessage().getFormattedMessage())) { saw = true; } } else { diff --git a/core/src/test/java/org/elasticsearch/threadpool/ThreadPoolSerializationTests.java b/core/src/test/java/org/elasticsearch/threadpool/ThreadPoolSerializationTests.java index 14cf10b8f31..bf0f5f6e606 100644 --- a/core/src/test/java/org/elasticsearch/threadpool/ThreadPoolSerializationTests.java +++ b/core/src/test/java/org/elasticsearch/threadpool/ThreadPoolSerializationTests.java @@ -40,11 +40,8 @@ import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; -/** - * - */ public class ThreadPoolSerializationTests extends ESTestCase { - BytesStreamOutput output = new BytesStreamOutput(); + private final BytesStreamOutput output = new BytesStreamOutput(); private ThreadPool.ThreadPoolType threadPoolType; @Before @@ -54,13 +51,13 @@ public class ThreadPoolSerializationTests extends ESTestCase { } public void testThatQueueSizeSerializationWorks() throws Exception { - ThreadPool.Info info = new ThreadPool.Info("foo", threadPoolType, 1, 10, TimeValue.timeValueMillis(3000), SizeValue.parseSizeValue("10k")); + ThreadPool.Info info = new ThreadPool.Info("foo", threadPoolType, 1, 10, + TimeValue.timeValueMillis(3000), SizeValue.parseSizeValue("10k")); output.setVersion(Version.CURRENT); info.writeTo(output); StreamInput input = output.bytes().streamInput(); - ThreadPool.Info newInfo = new ThreadPool.Info(); - newInfo.readFrom(input); + ThreadPool.Info newInfo = new ThreadPool.Info(input); assertThat(newInfo.getQueueSize().singles(), is(10000L)); } @@ -71,8 +68,7 @@ public class ThreadPoolSerializationTests extends ESTestCase { info.writeTo(output); StreamInput input = output.bytes().streamInput(); - ThreadPool.Info newInfo = new ThreadPool.Info(); - newInfo.readFrom(input); + ThreadPool.Info newInfo = new ThreadPool.Info(input); assertThat(newInfo.getQueueSize(), is(nullValue())); } @@ -103,7 +99,8 @@ public class ThreadPoolSerializationTests extends ESTestCase { } public void testThatToXContentWritesInteger() throws Exception { - ThreadPool.Info info = new ThreadPool.Info("foo", threadPoolType, 1, 10, TimeValue.timeValueMillis(3000), SizeValue.parseSizeValue("1k")); + ThreadPool.Info info = new ThreadPool.Info("foo", threadPoolType, 1, 10, + TimeValue.timeValueMillis(3000), SizeValue.parseSizeValue("1k")); XContentBuilder builder = jsonBuilder(); builder.startObject(); info.toXContent(builder, ToXContent.EMPTY_PARAMS); @@ -126,8 +123,7 @@ public class ThreadPoolSerializationTests extends ESTestCase { info.writeTo(output); StreamInput input = output.bytes().streamInput(); - ThreadPool.Info newInfo = new ThreadPool.Info(); - newInfo.readFrom(input); + ThreadPool.Info newInfo = new ThreadPool.Info(input); assertThat(newInfo.getThreadPoolType(), is(threadPoolType)); } diff --git a/core/src/test/resources/indices/bwc/index-2.4.0.zip b/core/src/test/resources/indices/bwc/index-2.4.0.zip new file mode 100644 index 00000000000..b34ca764f79 Binary files /dev/null and b/core/src/test/resources/indices/bwc/index-2.4.0.zip differ diff --git a/core/src/test/resources/indices/bwc/repo-2.4.0.zip b/core/src/test/resources/indices/bwc/repo-2.4.0.zip new file mode 100644 index 00000000000..a86f2b71771 Binary files /dev/null and b/core/src/test/resources/indices/bwc/repo-2.4.0.zip differ diff --git a/core/src/test/resources/org/elasticsearch/common/logging/config/logging.yml b/core/src/test/resources/org/elasticsearch/common/logging/config/logging.yml deleted file mode 100644 index 548b186e46f..00000000000 --- a/core/src/test/resources/org/elasticsearch/common/logging/config/logging.yml +++ /dev/null @@ -1,13 +0,0 @@ -# you can override using a command-line parameter -# -E logger.level=(ERROR|WARN|INFO|DEBUG|TRACE) -logger.level: INFO -rootLogger: ${logger.level}, console -logger: - test: TRACE, console - -appender: - console: - type: console - layout: - type: consolePattern - conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" diff --git a/core/src/test/resources/org/elasticsearch/common/logging/config/test2/logging.yml b/core/src/test/resources/org/elasticsearch/common/logging/config/test2/logging.yml deleted file mode 100644 index 71fbce639a5..00000000000 --- a/core/src/test/resources/org/elasticsearch/common/logging/config/test2/logging.yml +++ /dev/null @@ -1,10 +0,0 @@ -logger: - # log action execution errors for easier debugging - second: DEBUG, console2 - -appender: - console2: - type: console - layout: - type: consolePattern - conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" \ No newline at end of file diff --git a/core/src/test/resources/org/elasticsearch/common/logging/config/test2/test3/logging.yml b/core/src/test/resources/org/elasticsearch/common/logging/config/test2/test3/logging.yml deleted file mode 100644 index edfe0c9ed43..00000000000 --- a/core/src/test/resources/org/elasticsearch/common/logging/config/test2/test3/logging.yml +++ /dev/null @@ -1,10 +0,0 @@ -logger: - # log action execution errors for easier debugging - third: DEBUG, console3 - -appender: - console3: - type: console - layout: - type: consolePattern - conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" \ No newline at end of file diff --git a/distribution/build.gradle b/distribution/build.gradle index 5409cfd3bc6..edf9b6a70cd 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -295,7 +295,7 @@ configure(subprojects.findAll { ['deb', 'rpm'].contains(it.name) }) { that'll happen when createEtc runs. */ outputs.file "${packagingFiles}/etc/elasticsearch/elasticsearch.yml" outputs.file "${packagingFiles}/etc/elasticsearch/jvm.options" - outputs.file "${packagingFiles}/etc/elasticsearch/logging.yml" + outputs.file "${packagingFiles}/etc/elasticsearch/log4j2.properties" } task createPidDir(type: EmptyDirTask) { @@ -375,7 +375,7 @@ configure(subprojects.findAll { ['deb', 'rpm'].contains(it.name) }) { configurationFile '/etc/elasticsearch/elasticsearch.yml' configurationFile '/etc/elasticsearch/jvm.options' - configurationFile '/etc/elasticsearch/logging.yml' + configurationFile '/etc/elasticsearch/log4j2.properties' into('/etc/elasticsearch') { fileMode 0750 permissionGroup 'elasticsearch' @@ -490,6 +490,7 @@ Map expansionsForDistribution(distributionType) { 'path.conf': [ 'tar': '$ES_HOME/config', 'zip': '$ES_HOME/config', + 'integ-test-zip': '$ES_HOME/config', 'def': '/etc/elasticsearch', ], 'path.env': [ diff --git a/distribution/licenses/apache-log4j-extras-1.2.17.jar.sha1 b/distribution/licenses/apache-log4j-extras-1.2.17.jar.sha1 deleted file mode 100644 index 147721b8913..00000000000 --- a/distribution/licenses/apache-log4j-extras-1.2.17.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -85863614d82185d7e51fe21c00aa9117a523a8b6 diff --git a/distribution/licenses/apache-log4j-extras-NOTICE b/distribution/licenses/apache-log4j-extras-NOTICE deleted file mode 100644 index e02b7500ec3..00000000000 --- a/distribution/licenses/apache-log4j-extras-NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -Apache Extras Companion for log4j 1.2. -Copyright 2007 The Apache Software Foundation - -This product includes software developed at -The Apache Software Foundation (http://www.apache.org/). \ No newline at end of file diff --git a/distribution/licenses/log4j-1.2-api-2.6.2.jar.sha1 b/distribution/licenses/log4j-1.2-api-2.6.2.jar.sha1 new file mode 100644 index 00000000000..5bf4bcab46f --- /dev/null +++ b/distribution/licenses/log4j-1.2-api-2.6.2.jar.sha1 @@ -0,0 +1 @@ +3b4c5a8b734b6a29b2f03380535a48da6284b210 \ No newline at end of file diff --git a/distribution/licenses/log4j-1.2.17.jar.sha1 b/distribution/licenses/log4j-1.2.17.jar.sha1 deleted file mode 100644 index 383110e29f1..00000000000 --- a/distribution/licenses/log4j-1.2.17.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5af35056b4d257e4b64b9e8069c0746e8b08629f diff --git a/distribution/licenses/apache-log4j-extras-LICENSE b/distribution/licenses/log4j-LICENSE.txt similarity index 100% rename from distribution/licenses/apache-log4j-extras-LICENSE rename to distribution/licenses/log4j-LICENSE.txt diff --git a/distribution/licenses/log4j-NOTICE b/distribution/licenses/log4j-NOTICE.txt similarity index 100% rename from distribution/licenses/log4j-NOTICE rename to distribution/licenses/log4j-NOTICE.txt diff --git a/distribution/licenses/log4j-api-2.6.2.jar.sha1 b/distribution/licenses/log4j-api-2.6.2.jar.sha1 new file mode 100644 index 00000000000..e4f9af7497c --- /dev/null +++ b/distribution/licenses/log4j-api-2.6.2.jar.sha1 @@ -0,0 +1 @@ +bd1b74a5d170686362091c7cf596bbc3adf5c09b \ No newline at end of file diff --git a/distribution/licenses/log4j-LICENSE b/distribution/licenses/log4j-api-LICENSE.txt similarity index 100% rename from distribution/licenses/log4j-LICENSE rename to distribution/licenses/log4j-api-LICENSE.txt diff --git a/distribution/licenses/log4j-api-NOTICE.txt b/distribution/licenses/log4j-api-NOTICE.txt new file mode 100644 index 00000000000..03757323600 --- /dev/null +++ b/distribution/licenses/log4j-api-NOTICE.txt @@ -0,0 +1,5 @@ +Apache log4j +Copyright 2007 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). \ No newline at end of file diff --git a/distribution/licenses/log4j-core-2.6.2.jar.sha1 b/distribution/licenses/log4j-core-2.6.2.jar.sha1 new file mode 100644 index 00000000000..0ac4323411c --- /dev/null +++ b/distribution/licenses/log4j-core-2.6.2.jar.sha1 @@ -0,0 +1 @@ +00a91369f655eb1639c6aece5c5eb5108db18306 \ No newline at end of file diff --git a/distribution/licenses/log4j-core-LICENSE.txt b/distribution/licenses/log4j-core-LICENSE.txt new file mode 100644 index 00000000000..6279e5206de --- /dev/null +++ b/distribution/licenses/log4j-core-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 1999-2005 The Apache Software Foundation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/distribution/licenses/log4j-core-NOTICE.txt b/distribution/licenses/log4j-core-NOTICE.txt new file mode 100644 index 00000000000..03757323600 --- /dev/null +++ b/distribution/licenses/log4j-core-NOTICE.txt @@ -0,0 +1,5 @@ +Apache log4j +Copyright 2007 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). \ No newline at end of file diff --git a/distribution/src/main/resources/config/jvm.options b/distribution/src/main/resources/config/jvm.options index 63eba4789fc..fc352b14668 100644 --- a/distribution/src/main/resources/config/jvm.options +++ b/distribution/src/main/resources/config/jvm.options @@ -63,6 +63,11 @@ -Dio.netty.noUnsafe=true -Dio.netty.noKeySetOptimization=true +# log4j 2 +-Dlog4j.shutdownHookEnabled=false +-Dlog4j2.disable.jmx=true +-Dlog4j.skipJansi=true + ## heap dumps # generate a heap dump when an allocation from the Java heap fails diff --git a/distribution/src/main/resources/config/log4j2.properties b/distribution/src/main/resources/config/log4j2.properties new file mode 100644 index 00000000000..2cfe038cc84 --- /dev/null +++ b/distribution/src/main/resources/config/log4j2.properties @@ -0,0 +1,74 @@ +status = error + +# log action execution errors for easier debugging +logger.action.name = action +logger.action.level = debug + +appender.console.type = Console +appender.console.name = console +appender.console.layout.type = PatternLayout +appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %m%n + +appender.rolling.type = RollingFile +appender.rolling.name = rolling +appender.rolling.fileName = ${sys:es.logs}.log +appender.rolling.layout.type = PatternLayout +appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %.10000m%n +appender.rolling.filePattern = ${sys:es.logs}-%d{yyyy-MM-dd}.log +appender.rolling.policies.type = Policies +appender.rolling.policies.time.type = TimeBasedTriggeringPolicy +appender.rolling.policies.time.interval = 1 +appender.rolling.policies.time.modulate = true + +rootLogger.level = info +rootLogger.appenderRef.console.ref = console +rootLogger.appenderRef.rolling.ref = rolling + +appender.deprecation_rolling.type = RollingFile +appender.deprecation_rolling.name = deprecation_rolling +appender.deprecation_rolling.fileName = ${sys:es.logs}_deprecation.log +appender.deprecation_rolling.layout.type = PatternLayout +appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %.10000m%n +appender.deprecation_rolling.filePattern = ${sys:es.logs}_deprecation-%i.log.gz +appender.deprecation_rolling.policies.type = Policies +appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy +appender.deprecation_rolling.policies.size.size = 1GB +appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy +appender.deprecation_rolling.strategy.max = 4 + +logger.deprecation.name = deprecation +logger.deprecation.level = warn +logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling +logger.deprecation.additivity = false + +appender.index_search_slowlog_rolling.type = RollingFile +appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling +appender.index_search_slowlog_rolling.fileName = ${sys:es.logs}_index_search_slowlog.log +appender.index_search_slowlog_rolling.layout.type = PatternLayout +appender.index_search_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %.10000m%n +appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs}_index_search_slowlog-%d{yyyy-MM-dd}.log +appender.index_search_slowlog_rolling.policies.type = Policies +appender.index_search_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy +appender.index_search_slowlog_rolling.policies.time.interval = 1 +appender.index_search_slowlog_rolling.policies.time.modulate = true + +logger.index_search_slowlog_rolling.name = index.search.slowlog +logger.index_search_slowlog_rolling.level = trace +logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling +logger.index_search_slowlog_rolling.additivity = false + +appender.index_indexing_slowlog_rolling.type = RollingFile +appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling +appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs}_index_indexing_slowlog.log +appender.index_indexing_slowlog_rolling.layout.type = PatternLayout +appender.index_indexing_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %.10000m%n +appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs}_index_indexing_slowlog-%d{yyyy-MM-dd}.log +appender.index_indexing_slowlog_rolling.policies.type = Policies +appender.index_indexing_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy +appender.index_indexing_slowlog_rolling.policies.time.interval = 1 +appender.index_indexing_slowlog_rolling.policies.time.modulate = true + +logger.index_indexing_slowlog.name = index.indexing.slowlog.index +logger.index_indexing_slowlog.level = trace +logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling +logger.index_indexing_slowlog.additivity = false diff --git a/distribution/src/main/resources/config/logging.yml b/distribution/src/main/resources/config/logging.yml deleted file mode 100644 index 12cac3bd14e..00000000000 --- a/distribution/src/main/resources/config/logging.yml +++ /dev/null @@ -1,86 +0,0 @@ -# you can override using a command-line parameter -# -E logger.level=(ERROR|WARN|INFO|DEBUG|TRACE) -logger.level: INFO -rootLogger: ${logger.level}, console, file -logger: - # log action execution errors for easier debugging - action: DEBUG - - # deprecation logging, turn to INFO to disable them - deprecation: WARN, deprecation_log_file - - # reduce the logging for aws, too much is logged under the default INFO - com.amazonaws: WARN - # aws will try to do some sketchy JMX stuff, but its not needed. - com.amazonaws.jmx.SdkMBeanRegistrySupport: ERROR - com.amazonaws.metrics.AwsSdkMetrics: ERROR - - org.apache.http: INFO - - # gateway - #gateway: DEBUG - #index.gateway: DEBUG - - # peer shard recovery - #indices.recovery: DEBUG - - # discovery - #discovery: TRACE - - index.search.slowlog: TRACE, index_search_slow_log_file - index.indexing.slowlog: TRACE, index_indexing_slow_log_file - -additivity: - index.search.slowlog: false - index.indexing.slowlog: false - deprecation: false - -appender: - console: - type: console - layout: - type: consolePattern - conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" - - file: - type: dailyRollingFile - file: ${path.logs}/${cluster.name}.log - datePattern: "'.'yyyy-MM-dd" - layout: - type: pattern - conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %.10000m%n" - - # Use the following log4j-extras RollingFileAppender to enable gzip compression of log files. - # For more information see https://logging.apache.org/log4j/extras/apidocs/org/apache/log4j/rolling/RollingFileAppender.html - #file: - #type: extrasRollingFile - #file: ${path.logs}/${cluster.name}.log - #rollingPolicy: timeBased - #rollingPolicy.FileNamePattern: ${path.logs}/${cluster.name}.log.%d{yyyy-MM-dd}.gz - #layout: - #type: pattern - #conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" - - deprecation_log_file: - type: dailyRollingFile - file: ${path.logs}/${cluster.name}_deprecation.log - datePattern: "'.'yyyy-MM-dd" - layout: - type: pattern - conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" - - index_search_slow_log_file: - type: dailyRollingFile - file: ${path.logs}/${cluster.name}_index_search_slowlog.log - datePattern: "'.'yyyy-MM-dd" - layout: - type: pattern - conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" - - index_indexing_slow_log_file: - type: dailyRollingFile - file: ${path.logs}/${cluster.name}_index_indexing_slowlog.log - datePattern: "'.'yyyy-MM-dd" - layout: - type: pattern - conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" diff --git a/docs/build.gradle b/docs/build.gradle index caf7cfea01e..d930dfb5b60 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -65,18 +65,18 @@ buildRestTests.docs = fileTree(projectDir) { Closure setupTwitter = { String name, int count -> buildRestTests.setups[name] = ''' - do: - indices.create: - index: twitter - body: - settings: - number_of_shards: 1 - number_of_replicas: 1 + indices.create: + index: twitter + body: + settings: + number_of_shards: 1 + number_of_replicas: 1 - do: - bulk: - index: twitter - type: tweet - refresh: true - body: |''' + bulk: + index: twitter + type: tweet + refresh: true + body: |''' for (int i = 0; i < count; i++) { String user, text if (i == 0) { @@ -87,12 +87,13 @@ Closure setupTwitter = { String name, int count -> text = "some message with the number $i" } buildRestTests.setups[name] += """ - {"index":{"_id": "$i"}} - {"user": "$user", "message": "$text", "date": "2009-11-15T14:12:12", "likes": $i}""" + {"index":{"_id": "$i"}} + {"user": "$user", "message": "$text", "date": "2009-11-15T14:12:12", "likes": $i}""" } } setupTwitter('twitter', 5) setupTwitter('big_twitter', 120) +setupTwitter('huge_twitter', 1200) buildRestTests.setups['host'] = ''' # Fetch the http host. We use the host of the master because we know there will always be a master. diff --git a/docs/groovy-api/anatomy.asciidoc b/docs/groovy-api/anatomy.asciidoc index 33d8ef72f3d..99e008fb6eb 100644 --- a/docs/groovy-api/anatomy.asciidoc +++ b/docs/groovy-api/anatomy.asciidoc @@ -17,7 +17,7 @@ elasticsearch Java `ActionFuture` (in turn a nicer extension to Java own `Future`) which allows to register listeners (closures) on it for success and failures, as well as blocking for the response. For example: -[source,js] +[source,groovy] -------------------------------------------------- def indexR = client.index { index "test" @@ -38,7 +38,7 @@ println "Indexed $indexR.response.id into $indexR.response.index/$indexR.respons In the above example, calling `indexR.response` will simply block for the response. We can also block for the response for a specific timeout: -[source,js] +[source,groovy] -------------------------------------------------- IndexResponse response = indexR.response "5s" // block for 5 seconds, same as: response = indexR.response 5, TimeValue.SECONDS // @@ -47,7 +47,7 @@ response = indexR.response 5, TimeValue.SECONDS // We can also register closures that will be called on success and on failure: -[source,js] +[source,groovy] -------------------------------------------------- indexR.success = {IndexResponse response -> println "Indexed $response.id into $response.index/$response.type" @@ -65,7 +65,7 @@ This option allows to pass the actual instance of the request (instead of a closure) as a parameter. The rest is similar to the closure as a parameter option (the `GActionFuture` handling). For example: -[source,js] +[source,groovy] -------------------------------------------------- def indexR = client.index (new IndexRequest( index: "test", @@ -90,7 +90,7 @@ The last option is to provide an actual instance of the API request, and an `ActionListener` for the callback. This is exactly like the Java API with the added `gexecute` which returns the `GActionFuture`: -[source,js] +[source,groovy] -------------------------------------------------- def indexR = node.client.prepareIndex("test", "type1", "1").setSource({ test = "value" diff --git a/docs/groovy-api/client.asciidoc b/docs/groovy-api/client.asciidoc index c0a6d688415..a2745f459bd 100644 --- a/docs/groovy-api/client.asciidoc +++ b/docs/groovy-api/client.asciidoc @@ -13,7 +13,7 @@ within the cluster. A Node based client is the simplest form to get a `GClient` to start executing operations against elasticsearch. -[source,js] +[source,groovy] -------------------------------------------------- import org.elasticsearch.groovy.client.GClient import org.elasticsearch.groovy.node.GNode @@ -33,7 +33,7 @@ Since elasticsearch allows to configure it using JSON based settings, the configuration itself can be done using a closure that represent the JSON: -[source,js] +[source,groovy] -------------------------------------------------- import org.elasticsearch.groovy.node.GNode import org.elasticsearch.groovy.node.GNodeBuilder diff --git a/docs/groovy-api/delete.asciidoc b/docs/groovy-api/delete.asciidoc index e3320126966..18f11e67775 100644 --- a/docs/groovy-api/delete.asciidoc +++ b/docs/groovy-api/delete.asciidoc @@ -6,7 +6,7 @@ The delete API is very similar to the Java delete API, here is an example: -[source,js] +[source,groovy] -------------------------------------------------- def deleteF = node.client.delete { index "test" diff --git a/docs/groovy-api/get.asciidoc b/docs/groovy-api/get.asciidoc index 6bf476c16a8..824c18f8f11 100644 --- a/docs/groovy-api/get.asciidoc +++ b/docs/groovy-api/get.asciidoc @@ -7,7 +7,7 @@ Java get API. The main benefit of using groovy is handling the source content. It can be automatically converted to a `Map` which means using Groovy to navigate it is simple: -[source,js] +[source,groovy] -------------------------------------------------- def getF = node.client.get { index "test" diff --git a/docs/groovy-api/index_.asciidoc b/docs/groovy-api/index_.asciidoc index cd7f0ca4ac9..b63a212352a 100644 --- a/docs/groovy-api/index_.asciidoc +++ b/docs/groovy-api/index_.asciidoc @@ -7,7 +7,7 @@ Java index API. The Groovy extension to it is the ability to provide the indexed source using a closure. For example: -[source,js] +[source,groovy] -------------------------------------------------- def indexR = client.index { index "test" diff --git a/docs/groovy-api/search.asciidoc b/docs/groovy-api/search.asciidoc index 946760d95cc..d0b74a4d658 100644 --- a/docs/groovy-api/search.asciidoc +++ b/docs/groovy-api/search.asciidoc @@ -7,7 +7,7 @@ Java search API. The Groovy extension allows to provide the search source to execute as a `Closure` including the query itself (similar to GORM criteria builder): -[source,js] +[source,groovy] -------------------------------------------------- def search = node.client.search { indices "test" @@ -19,7 +19,7 @@ def search = node.client.search { } } -search.response.hits.each {SearchHit hit -> +search.response.hits.each {SearchHit hit -> println "Got hit $hit.id from $hit.index/$hit.type" } -------------------------------------------------- @@ -27,13 +27,13 @@ search.response.hits.each {SearchHit hit -> It can also be executed using the "Java API" while still using a closure for the query: -[source,js] +[source,groovy] -------------------------------------------------- def search = node.client.prepareSearch("test").setQuery({ term(test: "value") }).gexecute(); -search.response.hits.each {SearchHit hit -> +search.response.hits.each {SearchHit hit -> println "Got hit $hit.id from $hit.index/$hit.type" } -------------------------------------------------- @@ -48,7 +48,7 @@ The format of the search `Closure` follows the same JSON syntax as the Term query where multiple values are provided (see {ref}/query-dsl-terms-query.html[terms]): -[source,js] +[source,groovy] -------------------------------------------------- def search = node.client.search { indices "test" @@ -64,7 +64,7 @@ def search = node.client.search { Query string (see {ref}/query-dsl-query-string-query.html[query string]): -[source,js] +[source,groovy] -------------------------------------------------- def search = node.client.search { indices "test" @@ -82,7 +82,7 @@ def search = node.client.search { Pagination (see {ref}/search-request-from-size.html[from/size]): -[source,js] +[source,groovy] -------------------------------------------------- def search = node.client.search { indices "test" @@ -99,7 +99,7 @@ def search = node.client.search { Sorting (see {ref}/search-request-sort.html[sort]): -[source,js] +[source,groovy] -------------------------------------------------- def search = node.client.search { indices "test" diff --git a/docs/plugins/analysis-icu.asciidoc b/docs/plugins/analysis-icu.asciidoc index b43a10e25b9..a21e0c5c82f 100644 --- a/docs/plugins/analysis-icu.asciidoc +++ b/docs/plugins/analysis-icu.asciidoc @@ -17,7 +17,6 @@ This plugin can be installed using the plugin manager: ---------------------------------------------------------------- sudo bin/elasticsearch-plugin install analysis-icu ---------------------------------------------------------------- -// NOTCONSOLE The plugin must be installed on every node in the cluster, and each node must be restarted after installation. @@ -32,7 +31,6 @@ The plugin can be removed with the following command: ---------------------------------------------------------------- sudo bin/elasticsearch-plugin remove analysis-icu ---------------------------------------------------------------- -// NOTCONSOLE The node must be stopped before removing the plugin. diff --git a/docs/plugins/analysis-kuromoji.asciidoc b/docs/plugins/analysis-kuromoji.asciidoc index 5899134cec0..90584645bbb 100644 --- a/docs/plugins/analysis-kuromoji.asciidoc +++ b/docs/plugins/analysis-kuromoji.asciidoc @@ -14,7 +14,6 @@ This plugin can be installed using the plugin manager: ---------------------------------------------------------------- sudo bin/elasticsearch-plugin install analysis-kuromoji ---------------------------------------------------------------- -// NOTCONSOLE The plugin must be installed on every node in the cluster, and each node must be restarted after installation. @@ -29,7 +28,6 @@ The plugin can be removed with the following command: ---------------------------------------------------------------- sudo bin/elasticsearch-plugin remove analysis-kuromoji ---------------------------------------------------------------- -// NOTCONSOLE The node must be stopped before removing the plugin. diff --git a/docs/plugins/analysis-phonetic.asciidoc b/docs/plugins/analysis-phonetic.asciidoc index 4fcfcf6caba..34f14abe3c5 100644 --- a/docs/plugins/analysis-phonetic.asciidoc +++ b/docs/plugins/analysis-phonetic.asciidoc @@ -15,7 +15,6 @@ This plugin can be installed using the plugin manager: ---------------------------------------------------------------- sudo bin/elasticsearch-plugin install analysis-phonetic ---------------------------------------------------------------- -// NOTCONSOLE The plugin must be installed on every node in the cluster, and each node must be restarted after installation. @@ -30,7 +29,6 @@ The plugin can be removed with the following command: ---------------------------------------------------------------- sudo bin/elasticsearch-plugin remove analysis-phonetic ---------------------------------------------------------------- -// NOTCONSOLE The node must be stopped before removing the plugin. diff --git a/docs/plugins/analysis-smartcn.asciidoc b/docs/plugins/analysis-smartcn.asciidoc index 665ccbaf611..18a020cf40d 100644 --- a/docs/plugins/analysis-smartcn.asciidoc +++ b/docs/plugins/analysis-smartcn.asciidoc @@ -20,7 +20,6 @@ This plugin can be installed using the plugin manager: ---------------------------------------------------------------- sudo bin/elasticsearch-plugin install analysis-smartcn ---------------------------------------------------------------- -// NOTCONSOLE The plugin must be installed on every node in the cluster, and each node must be restarted after installation. @@ -35,7 +34,6 @@ The plugin can be removed with the following command: ---------------------------------------------------------------- sudo bin/elasticsearch-plugin remove analysis-smartcn ---------------------------------------------------------------- -// NOTCONSOLE The node must be stopped before removing the plugin. diff --git a/docs/plugins/analysis-stempel.asciidoc b/docs/plugins/analysis-stempel.asciidoc index 8a42135a879..88b43a1a805 100644 --- a/docs/plugins/analysis-stempel.asciidoc +++ b/docs/plugins/analysis-stempel.asciidoc @@ -17,7 +17,6 @@ This plugin can be installed using the plugin manager: ---------------------------------------------------------------- sudo bin/elasticsearch-plugin install analysis-stempel ---------------------------------------------------------------- -// NOTCONSOLE The plugin must be installed on every node in the cluster, and each node must be restarted after installation. @@ -32,7 +31,6 @@ The plugin can be removed with the following command: ---------------------------------------------------------------- sudo bin/elasticsearch-plugin remove analysis-stempel ---------------------------------------------------------------- -// NOTCONSOLE The node must be stopped before removing the plugin. diff --git a/docs/plugins/discovery-azure-classic.asciidoc b/docs/plugins/discovery-azure-classic.asciidoc index a9ec9929b49..0feb5f7f8e5 100644 --- a/docs/plugins/discovery-azure-classic.asciidoc +++ b/docs/plugins/discovery-azure-classic.asciidoc @@ -17,7 +17,6 @@ This plugin can be installed using the plugin manager: ---------------------------------------------------------------- sudo bin/elasticsearch-plugin install discovery-azure-classic ---------------------------------------------------------------- -// NOTCONSOLE The plugin must be installed on every node in the cluster, and each node must be restarted after installation. @@ -32,7 +31,6 @@ The plugin can be removed with the following command: ---------------------------------------------------------------- sudo bin/elasticsearch-plugin remove discovery-azure-classic ---------------------------------------------------------------- -// NOTCONSOLE The node must be stopped before removing the plugin. @@ -359,7 +357,7 @@ ssh azure-elasticsearch-cluster.cloudapp.net Once connected, install Elasticsearch: -[source,sh] +["source","sh",subs="attributes,callouts"] ---- # Install Latest Java version # Read http://www.webupd8.org/2012/09/install-oracle-java-8-in-ubuntu-via-ppa.html for details @@ -372,36 +370,43 @@ sudo apt-get install oracle-java8-installer # sudo apt-get install openjdk-8-jre-headless # Download Elasticsearch -curl -s https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-2.0.0.deb -o elasticsearch-2.0.0.deb +curl -s https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-{version}.deb -o elasticsearch-{version}.deb # Prepare Elasticsearch installation -sudo dpkg -i elasticsearch-2.0.0.deb +sudo dpkg -i elasticsearch-{version}.deb ---- Check that elasticsearch is running: -[source,sh] +[source,js] ---- -curl http://localhost:9200/ +GET / ---- +// CONSOLE This command should give you a JSON result: -[source,js] ----- +["source","js",subs="attributes,callouts"] +-------------------------------------------- { - "status" : 200, - "name" : "Living Colossus", + "name" : "Cp8oag6", + "cluster_name" : "elasticsearch", "version" : { - "number" : "2.0.0", - "build_hash" : "a46900e9c72c0a623d71b54016357d5f94c8ea32", - "build_timestamp" : "2014-02-12T16:18:34Z", + "number" : "{version}", + "build_hash" : "f27399d", + "build_date" : "2016-03-30T09:51:41.449Z", "build_snapshot" : false, - "lucene_version" : "5.1" + "lucene_version" : "{lucene_version}" }, "tagline" : "You Know, for Search" } ----- +-------------------------------------------- +// TESTRESPONSE[s/"name" : "Cp8oag6",/"name" : "$body.name",/] +// TESTRESPONSE[s/"cluster_name" : "elasticsearch",/"cluster_name" : "$body.cluster_name",/] +// TESTRESPONSE[s/"build_hash" : "f27399d",/"build_hash" : "$body.version.build_hash",/] +// TESTRESPONSE[s/"build_date" : "2016-03-30T09:51:41.449Z",/"build_date" : $body.version.build_date,/] +// TESTRESPONSE[s/"build_snapshot" : false,/"build_snapshot" : $body.version.build_snapshot,/] +// So much s/// but at least we test that the layout is close to matching.... [[discovery-azure-classic-long-plugin]] ===== Install elasticsearch cloud azure plugin diff --git a/docs/plugins/discovery-ec2.asciidoc b/docs/plugins/discovery-ec2.asciidoc index b62c5484905..0803d0a4fcd 100644 --- a/docs/plugins/discovery-ec2.asciidoc +++ b/docs/plugins/discovery-ec2.asciidoc @@ -15,7 +15,6 @@ This plugin can be installed using the plugin manager: ---------------------------------------------------------------- sudo bin/elasticsearch-plugin install discovery-ec2 ---------------------------------------------------------------- -// NOTCONSOLE The plugin must be installed on every node in the cluster, and each node must be restarted after installation. @@ -30,7 +29,6 @@ The plugin can be removed with the following command: ---------------------------------------------------------------- sudo bin/elasticsearch-plugin remove discovery-ec2 ---------------------------------------------------------------- -// NOTCONSOLE The node must be stopped before removing the plugin. @@ -231,6 +229,7 @@ Management Console. It should look similar to this. "Version": "2012-10-17" } ---- +// NOTCONSOLE [[discovery-ec2-filtering]] ===== Filtering by Tags diff --git a/docs/plugins/discovery-gce.asciidoc b/docs/plugins/discovery-gce.asciidoc index aa458d28af4..f615fc9810e 100644 --- a/docs/plugins/discovery-gce.asciidoc +++ b/docs/plugins/discovery-gce.asciidoc @@ -13,7 +13,6 @@ This plugin can be installed using the plugin manager: ---------------------------------------------------------------- sudo bin/elasticsearch-plugin install discovery-gce ---------------------------------------------------------------- -// NOTCONSOLE The plugin must be installed on every node in the cluster, and each node must be restarted after installation. @@ -28,7 +27,6 @@ The plugin can be removed with the following command: ---------------------------------------------------------------- sudo bin/elasticsearch-plugin remove discovery-gce ---------------------------------------------------------------- -// NOTCONSOLE The node must be stopped before removing the plugin. @@ -271,11 +269,11 @@ If anything goes wrong, you should check logs: tail -f /var/log/elasticsearch/elasticsearch.log -------------------------------------------------- -If needed, you can change log level to `TRACE` by opening `logging.yml`: +If needed, you can change log level to `trace` by opening `log4j2.properties`: [source,sh] -------------------------------------------------- -sudo vi /etc/elasticsearch/logging.yml +sudo vi /etc/elasticsearch/log4j2.properties -------------------------------------------------- and adding the following line: @@ -283,7 +281,8 @@ and adding the following line: [source,yaml] -------------------------------------------------- # discovery -discovery.gce: TRACE +logger.discovery_gce.name = discovery.gce +logger.discovery_gce.level = trace -------------------------------------------------- diff --git a/docs/plugins/index.asciidoc b/docs/plugins/index.asciidoc index 8a572d5ee3f..128b12f5dba 100644 --- a/docs/plugins/index.asciidoc +++ b/docs/plugins/index.asciidoc @@ -1,7 +1,9 @@ = Elasticsearch Plugins and Integrations -:ref: https://www.elastic.co/guide/en/elasticsearch/reference/master -:guide: https://www.elastic.co/guide +:ref: https://www.elastic.co/guide/en/elasticsearch/reference/master +:guide: https://www.elastic.co/guide +:version: 5.0.0-alpha5 +:lucene_version: 6.2.0 [[intro]] == Introduction to plugins @@ -62,5 +64,3 @@ include::integrations.asciidoc[] include::authors.asciidoc[] include::redirects.asciidoc[] - - diff --git a/docs/plugins/ingest-attachment.asciidoc b/docs/plugins/ingest-attachment.asciidoc index 3c72e514094..65add6ac927 100644 --- a/docs/plugins/ingest-attachment.asciidoc +++ b/docs/plugins/ingest-attachment.asciidoc @@ -21,7 +21,6 @@ This plugin can be installed using the plugin manager: ---------------------------------------------------------------- sudo bin/elasticsearch-plugin install ingest-attachment ---------------------------------------------------------------- -// NOTCONSOLE The plugin must be installed on every node in the cluster, and each node must be restarted after installation. @@ -36,7 +35,6 @@ The plugin can be removed with the following command: ---------------------------------------------------------------- sudo bin/elasticsearch-plugin remove ingest-attachment ---------------------------------------------------------------- -// NOTCONSOLE The node must be stopped before removing the plugin. diff --git a/docs/plugins/ingest-geoip.asciidoc b/docs/plugins/ingest-geoip.asciidoc index d6eced47eca..1626be6c8e6 100644 --- a/docs/plugins/ingest-geoip.asciidoc +++ b/docs/plugins/ingest-geoip.asciidoc @@ -21,7 +21,6 @@ This plugin can be installed using the plugin manager: ---------------------------------------------------------------- sudo bin/elasticsearch-plugin install ingest-geoip ---------------------------------------------------------------- -// NOTCONSOLE The plugin must be installed on every node in the cluster, and each node must be restarted after installation. @@ -36,7 +35,6 @@ The plugin can be removed with the following command: ---------------------------------------------------------------- sudo bin/elasticsearch-plugin remove ingest-geoip ---------------------------------------------------------------- -// NOTCONSOLE The node must be stopped before removing the plugin. diff --git a/docs/plugins/ingest-user-agent.asciidoc b/docs/plugins/ingest-user-agent.asciidoc index 95997a34c10..29903224f39 100644 --- a/docs/plugins/ingest-user-agent.asciidoc +++ b/docs/plugins/ingest-user-agent.asciidoc @@ -16,7 +16,6 @@ This plugin can be installed using the plugin manager: ---------------------------------------------------------------- sudo bin/elasticsearch-plugin install ingest-user-agent ---------------------------------------------------------------- -// NOTCONSOLE The plugin must be installed on every node in the cluster, and each node must be restarted after installation. @@ -31,7 +30,6 @@ The plugin can be removed with the following command: ---------------------------------------------------------------- sudo bin/elasticsearch-plugin remove ingest-user-agent ---------------------------------------------------------------- -// NOTCONSOLE The node must be stopped before removing the plugin. diff --git a/docs/plugins/lang-javascript.asciidoc b/docs/plugins/lang-javascript.asciidoc index 41fbaed3c38..0b8346f2a90 100644 --- a/docs/plugins/lang-javascript.asciidoc +++ b/docs/plugins/lang-javascript.asciidoc @@ -1,6 +1,8 @@ [[lang-javascript]] === JavaScript Language Plugin +deprecated[5.0.0,JavaScript will be replaced by the new scripting language {ref}/modules-scripting-painless.html[`Painless`]] + The JavaScript language plugin enables the use of JavaScript in Elasticsearch scripts, via Mozilla's https://developer.mozilla.org/en-US/docs/Mozilla/Projects/Rhino[Rhino JavaScript] engine. @@ -15,7 +17,6 @@ This plugin can be installed using the plugin manager: ---------------------------------------------------------------- sudo bin/elasticsearch-plugin install lang-javascript ---------------------------------------------------------------- -// NOTCONSOLE The plugin must be installed on every node in the cluster, and each node must be restarted after installation. @@ -30,7 +31,6 @@ The plugin can be removed with the following command: ---------------------------------------------------------------- sudo bin/elasticsearch-plugin remove lang-javascript ---------------------------------------------------------------- -// NOTCONSOLE The node must be stopped before removing the plugin. @@ -145,11 +145,10 @@ JavaScript: First, save this file as `config/scripts/my_script.js` on every node in the cluster: -[source,js] +[source,painless] ---- doc["num"].value * factor ---- -// NOTCONSOLE then use the script as follows: diff --git a/docs/plugins/lang-python.asciidoc b/docs/plugins/lang-python.asciidoc index 96d00d9e2c8..a642e7f65cc 100644 --- a/docs/plugins/lang-python.asciidoc +++ b/docs/plugins/lang-python.asciidoc @@ -1,6 +1,8 @@ [[lang-python]] === Python Language Plugin +deprecated[5.0.0,Python will be replaced by the new scripting language {ref}/modules-scripting-painless.html[`Painless`]] + The Python language plugin enables the use of Python in Elasticsearch scripts, via the http://www.jython.org/[Jython] Java implementation of Python. @@ -14,7 +16,6 @@ This plugin can be installed using the plugin manager: ---------------------------------------------------------------- sudo bin/elasticsearch-plugin install lang-python ---------------------------------------------------------------- -// NOTCONSOLE The plugin must be installed on every node in the cluster, and each node must be restarted after installation. @@ -29,7 +30,6 @@ The plugin can be removed with the following command: ---------------------------------------------------------------- sudo bin/elasticsearch-plugin remove lang-python ---------------------------------------------------------------- -// NOTCONSOLE The node must be stopped before removing the plugin. diff --git a/docs/plugins/mapper-attachments.asciidoc b/docs/plugins/mapper-attachments.asciidoc index 1a294cee78a..0e21c6ab705 100644 --- a/docs/plugins/mapper-attachments.asciidoc +++ b/docs/plugins/mapper-attachments.asciidoc @@ -19,7 +19,6 @@ This plugin can be installed using the plugin manager: ---------------------------------------------------------------- sudo bin/elasticsearch-plugin install mapper-attachments ---------------------------------------------------------------- -// NOTCONSOLE The plugin must be installed on every node in the cluster, and each node must be restarted after installation. @@ -34,7 +33,6 @@ The plugin can be removed with the following command: ---------------------------------------------------------------- sudo bin/elasticsearch-plugin remove mapper-attachments ---------------------------------------------------------------- -// NOTCONSOLE The node must be stopped before removing the plugin. diff --git a/docs/plugins/mapper-murmur3.asciidoc b/docs/plugins/mapper-murmur3.asciidoc index f81c226d1f0..28b7a2387ef 100644 --- a/docs/plugins/mapper-murmur3.asciidoc +++ b/docs/plugins/mapper-murmur3.asciidoc @@ -15,7 +15,6 @@ This plugin can be installed using the plugin manager: ---------------------------------------------------------------- sudo bin/elasticsearch-plugin install mapper-murmur3 ---------------------------------------------------------------- -// NOTCONSOLE The plugin must be installed on every node in the cluster, and each node must be restarted after installation. @@ -30,7 +29,6 @@ The plugin can be removed with the following command: ---------------------------------------------------------------- sudo bin/elasticsearch-plugin remove mapper-murmur3 ---------------------------------------------------------------- -// NOTCONSOLE The node must be stopped before removing the plugin. diff --git a/docs/plugins/mapper-size.asciidoc b/docs/plugins/mapper-size.asciidoc index 4b2d02a6a2b..3c0277895d2 100644 --- a/docs/plugins/mapper-size.asciidoc +++ b/docs/plugins/mapper-size.asciidoc @@ -15,7 +15,6 @@ This plugin can be installed using the plugin manager: ---------------------------------------------------------------- sudo bin/elasticsearch-plugin install mapper-size ---------------------------------------------------------------- -// NOTCONSOLE The plugin must be installed on every node in the cluster, and each node must be restarted after installation. @@ -30,7 +29,6 @@ The plugin can be removed with the following command: ---------------------------------------------------------------- sudo bin/elasticsearch-plugin remove mapper-size ---------------------------------------------------------------- -// NOTCONSOLE The node must be stopped before removing the plugin. diff --git a/docs/plugins/plugin-script.asciidoc b/docs/plugins/plugin-script.asciidoc index 6c39975560f..f8fb8814129 100644 --- a/docs/plugins/plugin-script.asciidoc +++ b/docs/plugins/plugin-script.asciidoc @@ -15,7 +15,6 @@ Run the following command to get usage instructions: ----------------------------------- sudo bin/elasticsearch-plugin -h ----------------------------------- -// NOTCONSOLE [IMPORTANT] .Running as root @@ -42,7 +41,6 @@ Core Elasticsearch plugins can be installed as follows: ----------------------------------- sudo bin/elasticsearch-plugin install [plugin_name] ----------------------------------- -// NOTCONSOLE For instance, to install the core <>, just run the following command: @@ -51,7 +49,6 @@ following command: ----------------------------------- sudo bin/elasticsearch-plugin install analysis-icu ----------------------------------- -// NOTCONSOLE This command will install the version of the plugin that matches your Elasticsearch version and also show a progress bar while downloading. @@ -65,7 +62,6 @@ A plugin can also be downloaded directly from a custom location by specifying th ----------------------------------- sudo bin/elasticsearch-plugin install [url] <1> ----------------------------------- -// NOTCONSOLE <1> must be a valid URL, the plugin name is determined from its descriptor. For instance, to install a plugin from your local file system, you could run: @@ -74,7 +70,6 @@ For instance, to install a plugin from your local file system, you could run: ----------------------------------- sudo bin/elasticsearch-plugin install file:///path/to/plugin.zip ----------------------------------- -// NOTCONSOLE The plugin script will refuse to talk to an HTTPS URL with an untrusted certificate. To use a self-signed HTTPS cert, you will need to add the CA cert @@ -84,7 +79,6 @@ to a local Java truststore and pass the location to the script as follows: ----------------------------------- sudo ES_JAVA_OPTS="-Djavax.net.ssl.trustStore=/path/to/trustStore.jks" bin/elasticsearch-plugin install https://.... ----------------------------------- -// NOTCONSOLE [[listing-removing]] === Listing and Removing Installed Plugins @@ -98,7 +92,6 @@ A list of the currently loaded plugins can be retrieved with the `list` option: ----------------------------------- sudo bin/elasticsearch-plugin list ----------------------------------- -// NOTCONSOLE Alternatively, use the {ref}/cluster-nodes-info.html[node-info API] to find out which plugins are installed on each node in the cluster @@ -113,7 +106,6 @@ Plugins can be removed manually, by deleting the appropriate directory under ----------------------------------- sudo bin/elasticsearch-plugin remove [pluginname] ----------------------------------- -// NOTCONSOLE After a Java plugin has been removed, you will need to restart the node to complete the removal process. @@ -145,7 +137,6 @@ can do this as follows: --------------------- sudo bin/elasticsearch-plugin -Epath.conf=/path/to/custom/config/dir install --------------------- -// NOTCONSOLE You can also set the `CONF_DIR` environment variable to the custom config directory path. @@ -168,7 +159,6 @@ sudo bin/elasticsearch-plugin install analysis-icu --timeout 1m # Wait forever (default) sudo bin/elasticsearch-plugin install analysis-icu --timeout 0 ----------------------------------- -// NOTCONSOLE [float] === Proxy settings @@ -181,7 +171,6 @@ and `http.proxyPort` (or `https.proxyHost` and `https.proxyPort`): ----------------------------------- sudo ES_JAVA_OPTS="-Dhttp.proxyHost=host_name -Dhttp.proxyPort=port_number -Dhttps.proxyHost=host_name -Dhttps.proxyPort=https_port_number" bin/elasticsearch-plugin install analysis-icu ----------------------------------- -// NOTCONSOLE Or on Windows: @@ -190,7 +179,6 @@ Or on Windows: set ES_JAVA_OPTS="-DproxyHost=host_name -DproxyPort=port_number -Dhttps.proxyHost=host_name -Dhttps.proxyPort=https_port_number" bin/elasticsearch-plugin install analysis-icu ------------------------------------ -// NOTCONSOLE === Plugins directory diff --git a/docs/plugins/repository-azure.asciidoc b/docs/plugins/repository-azure.asciidoc index 726f55cc889..03466f0c643 100644 --- a/docs/plugins/repository-azure.asciidoc +++ b/docs/plugins/repository-azure.asciidoc @@ -14,7 +14,6 @@ This plugin can be installed using the plugin manager: ---------------------------------------------------------------- sudo bin/elasticsearch-plugin install repository-azure ---------------------------------------------------------------- -// NOTCONSOLE The plugin must be installed on every node in the cluster, and each node must be restarted after installation. @@ -29,7 +28,6 @@ The plugin can be removed with the following command: ---------------------------------------------------------------- sudo bin/elasticsearch-plugin remove repository-azure ---------------------------------------------------------------- -// NOTCONSOLE The node must be stopped before removing the plugin. diff --git a/docs/plugins/repository-gcs.asciidoc b/docs/plugins/repository-gcs.asciidoc index a9658e1f219..d0a5c748439 100644 --- a/docs/plugins/repository-gcs.asciidoc +++ b/docs/plugins/repository-gcs.asciidoc @@ -14,7 +14,6 @@ This plugin can be installed using the plugin manager: ---------------------------------------------------------------- sudo bin/elasticsearch-plugin install repository-gcs ---------------------------------------------------------------- -// NOTCONSOLE NOTE: The plugin requires new permission to be installed in order to work @@ -31,7 +30,6 @@ The plugin can be removed with the following command: ---------------------------------------------------------------- sudo bin/elasticsearch-plugin remove repository-gcs ---------------------------------------------------------------- -// NOTCONSOLE The node must be stopped before removing the plugin. @@ -133,6 +131,7 @@ A service account file looks like this: "client_x509_cert_url": "..." } ---- +// NOTCONSOLE This file must be copied in the `config` directory of the elasticsearch installation and on every node of the cluster. diff --git a/docs/plugins/repository-hdfs.asciidoc b/docs/plugins/repository-hdfs.asciidoc index 02239a78b15..62b1d2a95ca 100644 --- a/docs/plugins/repository-hdfs.asciidoc +++ b/docs/plugins/repository-hdfs.asciidoc @@ -14,7 +14,6 @@ This plugin can be installed through the plugin manager: ---------------------------------------------------------------- sudo bin/elasticsearch-plugin install repository-hdfs ---------------------------------------------------------------- -// NOTCONSOLE The plugin must be installed on _every_ node in the cluster, and each node must be restarted after installation. @@ -29,7 +28,6 @@ The plugin can be removed by specifying the _installed_ package: ---------------------------------------------------------------- sudo bin/elasticsearch-plugin remove repository-hdfs ---------------------------------------------------------------- -// NOTCONSOLE The node must be stopped before removing the plugin. diff --git a/docs/plugins/repository-s3.asciidoc b/docs/plugins/repository-s3.asciidoc index cf9d3248287..5848b827c63 100644 --- a/docs/plugins/repository-s3.asciidoc +++ b/docs/plugins/repository-s3.asciidoc @@ -16,7 +16,6 @@ This plugin can be installed using the plugin manager: ---------------------------------------------------------------- sudo bin/elasticsearch-plugin install repository-s3 ---------------------------------------------------------------- -// NOTCONSOLE The plugin must be installed on every node in the cluster, and each node must be restarted after installation. @@ -31,7 +30,6 @@ The plugin can be removed with the following command: ---------------------------------------------------------------- sudo bin/elasticsearch-plugin remove repository-s3 ---------------------------------------------------------------- -// NOTCONSOLE The node must be stopped before removing the plugin. @@ -303,6 +301,7 @@ IAM in conjunction with pre-existing S3 buckets. Here is an example policy which "Version": "2012-10-17" } ---- +// NOTCONSOLE You may further restrict the permissions by specifying a prefix within the bucket, in this example, named "foo". @@ -346,6 +345,7 @@ You may further restrict the permissions by specifying a prefix within the bucke "Version": "2012-10-17" } ---- +// NOTCONSOLE The bucket needs to exist to register a repository for snapshots. If you did not create the bucket then the repository registration will fail. If you want elasticsearch to create the bucket instead, you can add the permission to create a @@ -363,6 +363,7 @@ specific bucket like this: ] } ---- +// NOTCONSOLE [[repository-s3-endpoint]] ===== Using other S3 endpoint diff --git a/docs/plugins/store-smb.asciidoc b/docs/plugins/store-smb.asciidoc index ac35342f2f8..731894ae0a8 100644 --- a/docs/plugins/store-smb.asciidoc +++ b/docs/plugins/store-smb.asciidoc @@ -13,7 +13,6 @@ This plugin can be installed using the plugin manager: ---------------------------------------------------------------- sudo bin/elasticsearch-plugin install store-smb ---------------------------------------------------------------- -// NOTCONSOLE The plugin must be installed on every node in the cluster, and each node must be restarted after installation. @@ -28,7 +27,6 @@ The plugin can be removed with the following command: ---------------------------------------------------------------- sudo bin/elasticsearch-plugin remove store-smb ---------------------------------------------------------------- -// NOTCONSOLE The node must be stopped before removing the plugin. diff --git a/docs/python/index.asciidoc b/docs/python/index.asciidoc index d81a20ea392..8440ed1cd0d 100644 --- a/docs/python/index.asciidoc +++ b/docs/python/index.asciidoc @@ -35,7 +35,6 @@ It can be installed with pip: ------------------------------------ pip install elasticsearch ------------------------------------ -// NOTCONSOLE === Versioning @@ -49,6 +48,7 @@ later, 0.4 releases are meant to work with Elasticsearch 0.90.*. The recommended way to set your requirements in your `setup.py` or `requirements.txt` is: +[source,txt] ------------------------------------ # Elasticsearch 2.x elasticsearch>=2.0.0,<3.0.0 diff --git a/docs/reference/aggregations/bucket/histogram-aggregation.asciidoc b/docs/reference/aggregations/bucket/histogram-aggregation.asciidoc index b54a288b587..0336e21c2ea 100644 --- a/docs/reference/aggregations/bucket/histogram-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/histogram-aggregation.asciidoc @@ -227,12 +227,14 @@ a multi-value metrics aggregation, and in case of a single-value metrics aggrega The path must be defined in the following form: +// https://en.wikipedia.org/wiki/Extended_Backus%E2%80%93Naur_Form +[source,ebnf] -------------------------------------------------- -AGG_SEPARATOR := '>' -METRIC_SEPARATOR := '.' -AGG_NAME := -METRIC := -PATH := []*[] +AGG_SEPARATOR = '>' ; +METRIC_SEPARATOR = '.' ; +AGG_NAME = ; +METRIC = ; +PATH = [ , ]* [ , ] ; -------------------------------------------------- [source,js] diff --git a/docs/reference/aggregations/bucket/terms-aggregation.asciidoc b/docs/reference/aggregations/bucket/terms-aggregation.asciidoc index 2f130be11a2..68b2e8511f9 100644 --- a/docs/reference/aggregations/bucket/terms-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/terms-aggregation.asciidoc @@ -344,12 +344,14 @@ a multi-value metrics aggregation, and in case of a single-value metrics aggrega The path must be defined in the following form: +// https://en.wikipedia.org/wiki/Extended_Backus%E2%80%93Naur_Form +[source,ebnf] -------------------------------------------------- -AGG_SEPARATOR := '>' -METRIC_SEPARATOR := '.' -AGG_NAME := -METRIC := -PATH := []*[] +AGG_SEPARATOR = '>' ; +METRIC_SEPARATOR = '.' ; +AGG_NAME = ; +METRIC = ; +PATH = [ , ]* [ , ] ; -------------------------------------------------- [source,js] diff --git a/docs/reference/aggregations/metrics/cardinality-aggregation.asciidoc b/docs/reference/aggregations/metrics/cardinality-aggregation.asciidoc index 73d7f3c26bb..77fc7dfcd5a 100644 --- a/docs/reference/aggregations/metrics/cardinality-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/cardinality-aggregation.asciidoc @@ -71,6 +71,7 @@ The following chart shows how the error varies before and after the threshold: //// To generate this chart use this gnuplot script: +[source,gnuplot] ------- #!/usr/bin/gnuplot reset @@ -95,6 +96,7 @@ plot "test.dat" using 1:2 title "threshold=100", \ and generate data in a 'test.dat' file using the below Java code: +[source,java] ------- private static double error(HyperLogLogPlusPlus h, long expected) { double actual = h.cardinality(0); @@ -140,7 +142,7 @@ counting millions of items. On string fields that have a high cardinality, it might be faster to store the hash of your field values in your index and then run the cardinality aggregation on this field. This can either be done by providing hash values from client-side -or by letting elasticsearch compute hash values for you by using the +or by letting elasticsearch compute hash values for you by using the {plugins}/mapper-murmur3.html[`mapper-murmur3`] plugin. NOTE: Pre-computing hashes is usually only useful on very large and/or diff --git a/docs/reference/aggregations/pipeline.asciidoc b/docs/reference/aggregations/pipeline.asciidoc index 499438256e6..c9cd14d6203 100644 --- a/docs/reference/aggregations/pipeline.asciidoc +++ b/docs/reference/aggregations/pipeline.asciidoc @@ -34,12 +34,14 @@ will be included in the final output. Most pipeline aggregations require another aggregation as their input. The input aggregation is defined via the `buckets_path` parameter, which follows a specific format: +// https://en.wikipedia.org/wiki/Extended_Backus%E2%80%93Naur_Form +[source,ebnf] -------------------------------------------------- -AGG_SEPARATOR := '>' -METRIC_SEPARATOR := '.' -AGG_NAME := -METRIC := -PATH := []*[] +AGG_SEPARATOR = '>' ; +METRIC_SEPARATOR = '.' ; +AGG_NAME = ; +METRIC = ; +PATH = [ , ]* [ , ] ; -------------------------------------------------- For example, the path `"my_bucket>my_stats.avg"` will path to the `avg` value in the `"my_stats"` metric, which is diff --git a/docs/reference/cluster/allocation-explain.asciidoc b/docs/reference/cluster/allocation-explain.asciidoc index ec223722c68..769a8a4e10d 100644 --- a/docs/reference/cluster/allocation-explain.asciidoc +++ b/docs/reference/cluster/allocation-explain.asciidoc @@ -5,6 +5,8 @@ The cluster allocation explanation API is designed to assist in answering the question "why is this shard unassigned?". To explain the allocation (on unassigned state) of a shard, issue a request like: +experimental[The cluster allocation explain API is new and should still be considered experimental. The API may change in ways that are not backwards compatible] + [source,js] -------------------------------------------------- $ curl -XGET 'http://localhost:9200/_cluster/allocation/explain' -d'{ diff --git a/docs/reference/cluster/health.asciidoc b/docs/reference/cluster/health.asciidoc index 4bd4abb37f2..dc73b4408e6 100644 --- a/docs/reference/cluster/health.asciidoc +++ b/docs/reference/cluster/health.asciidoc @@ -13,6 +13,8 @@ GET _cluster/health // TEST[s/^/PUT test1\n/] Returns this: + +[source,js] -------------------------------------------------- { "cluster_name" : "testcluster", diff --git a/docs/reference/cluster/stats.asciidoc b/docs/reference/cluster/stats.asciidoc index 4758ea2b0c4..a95273f85f9 100644 --- a/docs/reference/cluster/stats.asciidoc +++ b/docs/reference/cluster/stats.asciidoc @@ -116,7 +116,17 @@ Will return, for example: "name": "Mac OS X", "count": 1 } - ] + ], + "mem" : { + "total" : "16gb", + "total_in_bytes" : 17179869184, + "free" : "78.1mb", + "free_in_bytes" : 81960960, + "used" : "15.9gb", + "used_in_bytes" : 17097908224, + "free_percent" : 0, + "used_percent" : 100 + } }, "process": { "cpu": { diff --git a/docs/reference/docs/reindex.asciidoc b/docs/reference/docs/reindex.asciidoc index cdb2b8b0ffb..a53ddc34dac 100644 --- a/docs/reference/docs/reindex.asciidoc +++ b/docs/reference/docs/reindex.asciidoc @@ -424,7 +424,7 @@ supports `refresh`, `wait_for_completion`, `wait_for_active_shards`, `timeout`, Sending the `refresh` url parameter will cause all indexes to which the request wrote to be refreshed. This is different than the Index API's `refresh` -parameter which causes just the shard that received the new data to be indexed. +parameter which causes just the shard that received the new data to be refreshed. If the request contains `wait_for_completion=false` then Elasticsearch will perform some preflight checks, launch the request, and then return a `task` diff --git a/docs/reference/how-to/indexing-speed.asciidoc b/docs/reference/how-to/indexing-speed.asciidoc index bb5a367a04c..50187af5b28 100644 --- a/docs/reference/how-to/indexing-speed.asciidoc +++ b/docs/reference/how-to/indexing-speed.asciidoc @@ -67,6 +67,15 @@ The filesystem cache will be used in order to buffer I/O operations. You should make sure to give at least half the memory of the machine running elasticsearch to the filesystem cache. +[float] +=== Use auto-generated ids + +When indexing a document that has an explicit id, elasticsearch needs to check +whether a document with the same id already exists within the same shard, which +is a costly operation and gets even more costly as the index grows. By using +auto-generated ids, Elasticsearch can skip this check, which makes indexing +faster. + [float] === Use faster hardware diff --git a/docs/reference/how-to/search-speed.asciidoc b/docs/reference/how-to/search-speed.asciidoc index 67848c9edca..2d0525a48e8 100644 --- a/docs/reference/how-to/search-speed.asciidoc +++ b/docs/reference/how-to/search-speed.asciidoc @@ -140,6 +140,124 @@ being mapped as <> rather than `integer` or `long`. In general, scripts should be avoided. If they are absolutely needed, you should prefer the `painless` and `expressions` engines. +[float] +=== Search rounded dates + +Queries on date fields that use `now` are typically not cacheable since the +range that is being matched changes all the time. However switching to a +rounded date is often acceptable in terms of user experience, and has the +benefit of making better use of the query cache. + +For instance the below query: + +[source,js] +-------------------------------------------------- +PUT index/type/1 +{ + "my_date": "2016-05-11T16:30:55.328Z" +} + +GET index/_search +{ + "query": { + "constant_score": { + "filter": { + "range": { + "my_date": { + "gte": "now-1h", + "lte": "now" + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + +could be replaced with the following query: + +[source,js] +-------------------------------------------------- +GET index/_search +{ + "query": { + "constant_score": { + "filter": { + "range": { + "my_date": { + "gte": "now-1h/m", + "lte": "now/m" + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +In that case we rounded to the minute, so if the current time is `16:31:29`, +the range query will match everything whose value of the `my_date` field is +between `15:31:00` and `16:31:59`. And if several users run a query that +contains this range in the same minute, the query cache could help speed things +up a bit. The longer the interval that is used for rounding, the more the query +cache can help, but beware that too aggressive rounding might also hurt user +experience. + + +NOTE: It might be tempting to split ranges into a large cacheable part and +smaller not cacheable parts in order to be able to leverage the query cache, +as shown below: + +[source,js] +-------------------------------------------------- +GET index/_search +{ + "query": { + "constant_score": { + "filter": { + "bool": { + "should": [ + { + "range": { + "my_date": { + "gte": "now-1h", + "lte": "now-1h/m" + } + } + }, + { + "range": { + "my_date": { + "gt": "now-1h/m", + "lt": "now/m" + } + } + }, + { + "range": { + "my_date": { + "gte": "now/m", + "lte": "now" + } + } + } + ] + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +However such practice might make the query run slower in some cases since the +overhead introduced by the `bool` query may defeat the savings from better +leveraging the query cache. + [float] === Force-merge read-only indices diff --git a/docs/reference/images/lambda_calc.png b/docs/reference/images/lambda_calc.png index 2d7f8bbb8db..4fd19a2660f 100644 Binary files a/docs/reference/images/lambda_calc.png and b/docs/reference/images/lambda_calc.png differ diff --git a/docs/reference/images/sigma_calc.png b/docs/reference/images/sigma_calc.png index d794c0a69d0..9001bbe9eaf 100644 Binary files a/docs/reference/images/sigma_calc.png and b/docs/reference/images/sigma_calc.png differ diff --git a/docs/reference/index-modules/slowlog.asciidoc b/docs/reference/index-modules/slowlog.asciidoc index 217a55fb0b9..423968bb071 100644 --- a/docs/reference/index-modules/slowlog.asciidoc +++ b/docs/reference/index-modules/slowlog.asciidoc @@ -40,17 +40,25 @@ of the actual execution on the specific machine, compared with request level. The logging file is configured by default using the following -configuration (found in `logging.yml`): +configuration (found in `log4j2.properties`): [source,yaml] -------------------------------------------------- -index_search_slow_log_file: - type: dailyRollingFile - file: ${path.logs}/${cluster.name}_index_search_slowlog.log - datePattern: "'.'yyyy-MM-dd" - layout: - type: pattern - conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" +appender.index_search_slowlog_rolling.type = RollingFile +appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling +appender.index_search_slowlog_rolling.fileName = ${sys:es.logs}_index_search_slowlog.log +appender.index_search_slowlog_rolling.layout.type = PatternLayout +appender.index_search_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %.10000m%n +appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs}_index_search_slowlog-%d{yyyy-MM-dd}.log +appender.index_search_slowlog_rolling.policies.type = Policies +appender.index_search_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy +appender.index_search_slowlog_rolling.policies.time.interval = 1 +appender.index_search_slowlog_rolling.policies.time.modulate = true + +logger.index_search_slowlog_rolling.name = index.search.slowlog +logger.index_search_slowlog_rolling.level = trace +logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling +logger.index_search_slowlog_rolling.additivity = false -------------------------------------------------- [float] diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index 036bd59ad64..427ef0a84dc 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -1,15 +1,16 @@ [[elasticsearch-reference]] = Elasticsearch Reference -:version: 5.0.0-alpha5 -:major-version: 5.x -:branch: master -:jdk: 1.8.0_73 -:defguide: https://www.elastic.co/guide/en/elasticsearch/guide/master -:plugins: https://www.elastic.co/guide/en/elasticsearch/plugins/master -:javaclient: https://www.elastic.co/guide/en/elasticsearch/client/java-api/master/ -:issue: https://github.com/elastic/elasticsearch/issues/ -:pull: https://github.com/elastic/elasticsearch/pull/ +:version: 5.0.0-alpha5 +:major-version: 5.x +:lucene_version: 6.2.0 +:branch: master +:jdk: 1.8.0_73 +:defguide: https://www.elastic.co/guide/en/elasticsearch/guide/master +:plugins: https://www.elastic.co/guide/en/elasticsearch/plugins/master +:javaclient: https://www.elastic.co/guide/en/elasticsearch/client/java-api/master/ +:issue: https://github.com/elastic/elasticsearch/issues/ +:pull: https://github.com/elastic/elasticsearch/pull/ include::getting-started.asciidoc[] @@ -52,7 +53,3 @@ include::glossary.asciidoc[] include::release-notes.asciidoc[] include::redirects.asciidoc[] - - - - diff --git a/docs/reference/indices/analyze.asciidoc b/docs/reference/indices/analyze.asciidoc index ee8b856ef41..dbb2c8f101a 100644 --- a/docs/reference/indices/analyze.asciidoc +++ b/docs/reference/indices/analyze.asciidoc @@ -43,13 +43,13 @@ curl -XGET 'localhost:9200/_analyze' -d ' curl -XGET 'localhost:9200/_analyze' -d ' { "tokenizer" : "keyword", - "token_filter" : ["lowercase"], + "filter" : ["lowercase"], "char_filter" : ["html_strip"], "text" : "this is a test" }' -------------------------------------------------- -deprecated[5.0.0, Use `filter`/`token_filter`/`char_filter` instead of `filters`/`token_filters`/`char_filters`] +deprecated[5.0.0, Use `filter`/`char_filter` instead of `filters`/`char_filters` and `token_filters` has been removed] Custom tokenizers, token filters, and character filters can be specified in the request body as follows: @@ -112,7 +112,7 @@ provided it doesn't start with `{` : [source,js] -------------------------------------------------- -curl -XGET 'localhost:9200/_analyze?tokenizer=keyword&token_filter=lowercase&char_filter=html_strip' -d 'this is a test' +curl -XGET 'localhost:9200/_analyze?tokenizer=keyword&filter=lowercase&char_filter=html_strip' -d 'this is a test' -------------------------------------------------- === Explain Analyze @@ -181,4 +181,5 @@ The request returns the following result: } } -------------------------------------------------- +// TESTRESPONSE <1> Output only "keyword" attribute, since specify "attributes" in the request. diff --git a/docs/reference/indices/clearcache.asciidoc b/docs/reference/indices/clearcache.asciidoc index 8ebb9e3488a..6a7240dc958 100644 --- a/docs/reference/indices/clearcache.asciidoc +++ b/docs/reference/indices/clearcache.asciidoc @@ -6,8 +6,10 @@ associated with one or more indices. [source,js] -------------------------------------------------- -$ curl -XPOST 'http://localhost:9200/twitter/_cache/clear' +POST /twitter/_cache/clear -------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] The API, by default, will clear all caches. Specific caches can be cleaned explicitly by setting `query`, `fielddata` or `request`. @@ -24,8 +26,9 @@ call, or even on `_all` the indices. [source,js] -------------------------------------------------- -$ curl -XPOST 'http://localhost:9200/kimchy,elasticsearch/_cache/clear' +POST /kimchy,elasticsearch/_cache/clear -$ curl -XPOST 'http://localhost:9200/_cache/clear' +POST /_cache/clear -------------------------------------------------- - +// CONSOLE +// TEST[s/^/PUT kimchy\nPUT elasticsearch\n/] diff --git a/docs/reference/indices/delete-index.asciidoc b/docs/reference/indices/delete-index.asciidoc index 5c652accfb9..bc057e155d0 100644 --- a/docs/reference/indices/delete-index.asciidoc +++ b/docs/reference/indices/delete-index.asciidoc @@ -5,8 +5,10 @@ The delete index API allows to delete an existing index. [source,js] -------------------------------------------------- -$ curl -XDELETE 'http://localhost:9200/twitter/' +DELETE /twitter -------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] The above example deletes an index called `twitter`. Specifying an index, alias or wildcard expression is required. diff --git a/docs/reference/indices/forcemerge.asciidoc b/docs/reference/indices/forcemerge.asciidoc index a33b7fdfe2c..26baf214176 100644 --- a/docs/reference/indices/forcemerge.asciidoc +++ b/docs/reference/indices/forcemerge.asciidoc @@ -12,8 +12,10 @@ block until the previous force merge is complete. [source,js] -------------------------------------------------- -$ curl -XPOST 'http://localhost:9200/twitter/_forcemerge' +POST /twitter/_forcemerge -------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] [float] [[forcemerge-parameters]] @@ -45,7 +47,9 @@ even on `_all` the indices. [source,js] -------------------------------------------------- -$ curl -XPOST 'http://localhost:9200/kimchy,elasticsearch/_forcemerge' +POST /kimchy,elasticsearch/_forcemerge -$ curl -XPOST 'http://localhost:9200/_forcemerge' +POST /_forcemerge -------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT kimchy\nPUT elasticsearch\n/] diff --git a/docs/reference/indices/get-field-mapping.asciidoc b/docs/reference/indices/get-field-mapping.asciidoc index 39667dc0874..224e74605f4 100644 --- a/docs/reference/indices/get-field-mapping.asciidoc +++ b/docs/reference/indices/get-field-mapping.asciidoc @@ -9,8 +9,10 @@ The following returns the mapping of the field `text` only: [source,js] -------------------------------------------------- -curl -XGET 'http://localhost:9200/twitter/_mapping/tweet/field/text' +GET /twitter/_mapping/tweet/field/message -------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] For which the response is (assuming `text` is a default string field): @@ -18,18 +20,28 @@ For which the response is (assuming `text` is a default string field): -------------------------------------------------- { "twitter": { - "tweet": { - "text": { - "full_name": "text", - "mapping": { - "text": { "type": "text" } + "mappings": { + "tweet": { + "message": { + "full_name": "message", + "mapping": { + "message": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + } + } } } } } } -------------------------------------------------- - +// TESTRESPONSE [float] @@ -44,12 +56,15 @@ following are some examples: [source,js] -------------------------------------------------- -curl -XGET 'http://localhost:9200/twitter,kimchy/_mapping/field/message' +GET /twitter,kimchy/_mapping/field/message -curl -XGET 'http://localhost:9200/_all/_mapping/tweet,book/field/message,user.id' +GET /_all/_mapping/tweet,book/field/message,user.id -curl -XGET 'http://localhost:9200/_all/_mapping/tw*/field/*.id' +GET /_all/_mapping/tw*/field/*.id -------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] +// TEST[s/^/PUT kimchy\nPUT book\n/] [float] === Specifying fields diff --git a/docs/reference/indices/get-index.asciidoc b/docs/reference/indices/get-index.asciidoc index b82bee05630..772318c71d8 100644 --- a/docs/reference/indices/get-index.asciidoc +++ b/docs/reference/indices/get-index.asciidoc @@ -1,12 +1,14 @@ [[indices-get-index]] == Get Index -The get index API allows to retrieve information about one or more indexes. +The get index API allows to retrieve information about one or more indexes. [source,js] -------------------------------------------------- -$ curl -XGET 'http://localhost:9200/twitter/' +GET /twitter -------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] The above example gets the information for an index called `twitter`. Specifying an index, alias or wildcard expression is required. @@ -17,13 +19,15 @@ all indices by using `_all` or `*` as index. [float] === Filtering index information -The information returned by the get API can be filtered to include only specific features +The information returned by the get API can be filtered to include only specific features by specifying a comma delimited list of features in the URL: [source,js] -------------------------------------------------- -$ curl -XGET 'http://localhost:9200/twitter/_settings,_mappings' +GET twitter/_settings,_mappings -------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] The above command will only return the settings and mappings for the index called `twitter`. diff --git a/docs/reference/indices/get-mapping.asciidoc b/docs/reference/indices/get-mapping.asciidoc index 317a708f13a..c3580917d9a 100644 --- a/docs/reference/indices/get-mapping.asciidoc +++ b/docs/reference/indices/get-mapping.asciidoc @@ -6,8 +6,10 @@ index/type. [source,js] -------------------------------------------------- -curl -XGET 'http://localhost:9200/twitter/_mapping/tweet' +GET /twitter/_mapping/tweet -------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] [float] === Multiple Indices and Types @@ -21,17 +23,21 @@ following are some examples: [source,js] -------------------------------------------------- -curl -XGET 'http://localhost:9200/_mapping/twitter,kimchy' +GET /_mapping/tweet,kimchy -curl -XGET 'http://localhost:9200/_all/_mapping/tweet,book' +GET /_all/_mapping/tweet,book -------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] If you want to get mappings of all indices and types then the following two examples are equivalent: [source,js] -------------------------------------------------- -curl -XGET 'http://localhost:9200/_all/_mapping' +GET /_all/_mapping -curl -XGET 'http://localhost:9200/_mapping' +GET /_mapping -------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] diff --git a/docs/reference/indices/get-settings.asciidoc b/docs/reference/indices/get-settings.asciidoc index 4689c448b56..60d7a75a861 100644 --- a/docs/reference/indices/get-settings.asciidoc +++ b/docs/reference/indices/get-settings.asciidoc @@ -5,8 +5,10 @@ The get settings API allows to retrieve settings of index/indices: [source,js] -------------------------------------------------- -$ curl -XGET 'http://localhost:9200/twitter/_settings' +GET /twitter/_settings -------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] [float] === Multiple Indices and Types @@ -20,12 +22,15 @@ Wildcard expressions are also supported. The following are some examples: [source,js] -------------------------------------------------- -curl -XGET 'http://localhost:9200/twitter,kimchy/_settings' +GET /twitter,kimchy/_settings -curl -XGET 'http://localhost:9200/_all/_settings' +GET /_all/_settings -curl -XGET 'http://localhost:9200/2013-*/_settings' +GET /log_2013_*/_settings -------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] +// TEST[s/^/PUT kimchy\nPUT log_2013_01_01\n/] [float] === Filtering settings by name diff --git a/docs/reference/indices/open-close.asciidoc b/docs/reference/indices/open-close.asciidoc index afdab7bedaf..59f36112b4e 100644 --- a/docs/reference/indices/open-close.asciidoc +++ b/docs/reference/indices/open-close.asciidoc @@ -12,10 +12,12 @@ example: [source,js] -------------------------------------------------- -curl -XPOST 'localhost:9200/my_index/_close' +POST /my_index/_close -curl -XPOST 'localhost:9200/my_index/_open' +POST /my_index/_open -------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT my_index\n/] It is possible to open and close multiple indices. An error will be thrown if the request explicitly refers to a missing index. This behaviour can be diff --git a/docs/reference/indices/recovery.asciidoc b/docs/reference/indices/recovery.asciidoc index c4aabac3ac3..448c423d0b6 100644 --- a/docs/reference/indices/recovery.asciidoc +++ b/docs/reference/indices/recovery.asciidoc @@ -8,15 +8,19 @@ For example, the following command would show recovery information for the indic [source,js] -------------------------------------------------- -curl -XGET http://localhost:9200/index1,index2/_recovery +GET index1,index2/_recovery?human -------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT index1\nPUT index2\n/] To see cluster-wide recovery status simply leave out the index names. [source,js] -------------------------------------------------- -curl -XGET http://localhost:9200/_recovery?pretty&human +GET /_recovery?human -------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT index1\n{"settings": {"index.number_of_shards": 1}}\n/] Response: [source,js] @@ -30,7 +34,7 @@ Response: "primary" : true, "start_time" : "2014-02-24T12:15:59.716", "start_time_in_millis": 1393244159716, - "total_time" : "2.9m" + "total_time" : "2.9m", "total_time_in_millis" : 175576, "source" : { "repository" : "my_repository", @@ -45,7 +49,7 @@ Response: }, "index" : { "size" : { - "total" : "75.4mb" + "total" : "75.4mb", "total_in_bytes" : 79063092, "reused" : "0b", "reused_in_bytes" : 0, @@ -68,7 +72,7 @@ Response: "percent" : "100.0%", "total_on_start" : 0, "total_time" : "0s", - "total_time_in_millis" : 0 + "total_time_in_millis" : 0, }, "start" : { "check_index_time" : "0s", @@ -80,6 +84,7 @@ Response: } } -------------------------------------------------- +// We should really assert that this is up to date but that is hard! The above response shows a single index recovering a single shard. In this case, the source of the recovery is a snapshot repository and the target of the recovery is the node with name "my_es_node". @@ -90,7 +95,7 @@ In some cases a higher level of detail may be preferable. Setting "detailed=true [source,js] -------------------------------------------------- -curl -XGET http://localhost:9200/_recovery?pretty&human&detailed=true +GET _recovery?human&detailed=true -------------------------------------------------- Response: @@ -170,6 +175,7 @@ Response: } } -------------------------------------------------- +// We should really assert that this is up to date but that is hard! This response shows a detailed listing (truncated for brevity) of the actual files recovered and their sizes. diff --git a/docs/reference/indices/refresh.asciidoc b/docs/reference/indices/refresh.asciidoc index bbc1f20f409..1e27ace3625 100644 --- a/docs/reference/indices/refresh.asciidoc +++ b/docs/reference/indices/refresh.asciidoc @@ -9,8 +9,10 @@ refresh is scheduled periodically. [source,js] -------------------------------------------------- -$ curl -XPOST 'http://localhost:9200/twitter/_refresh' +POST /twitter/_refresh -------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] [float] === Multi Index @@ -20,7 +22,9 @@ call, or even on `_all` the indices. [source,js] -------------------------------------------------- -$ curl -XPOST 'http://localhost:9200/kimchy,elasticsearch/_refresh' +POST /kimchy,elasticsearch/_refresh -$ curl -XPOST 'http://localhost:9200/_refresh' +POST /_refresh -------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT kimchy\nPUT elasticsearch\n/] diff --git a/docs/reference/indices/rollover-index.asciidoc b/docs/reference/indices/rollover-index.asciidoc index b12d93bb4b8..6ee28e7b2f4 100644 --- a/docs/reference/indices/rollover-index.asciidoc +++ b/docs/reference/indices/rollover-index.asciidoc @@ -19,7 +19,9 @@ PUT /logs-000001 <1> } } -POST logs_write/_rollover <2> +# Add > 1000 documents to logs-000001 + +POST /logs_write/_rollover <2> { "conditions": { "max_age": "7d", @@ -28,6 +30,8 @@ POST logs_write/_rollover <2> } -------------------------------------------------- // CONSOLE +// TEST[setup:huge_twitter] +// TEST[s/# Add > 1000 documents to logs-000001/POST _reindex?refresh\n{"source":{"index":"twitter"},"dest":{"index":"logs-000001"}}/] <1> Creates an index called `logs-0000001` with the alias `logs_write`. <2> If the index pointed to by `logs_write` was created 7 or more days ago, or contains 1,000 or more documents, then the `logs-0002` index is created @@ -38,6 +42,8 @@ The above request might return the following response: [source,js] -------------------------------------------------- { + "acknowledged": true, + "shards_acknowledged": true, "old_index": "logs-000001", "new_index": "logs-000002", "rolled_over": true, <1> @@ -48,9 +54,10 @@ The above request might return the following response: } } -------------------------------------------------- - <1> Whether the index was rolled over. - <2> Whether the rollover was dry run. - <3> The result of each condition. +// TESTRESPONSE +<1> Whether the index was rolled over. +<2> Whether the rollover was dry run. +<3> The result of each condition. [float] === Naming the new index @@ -65,9 +72,16 @@ the new index as follows: [source,js] -------------------------------------------------- -POST my_alias/_rollover/my_new_index_name -{...} +POST /my_alias/_rollover/my_new_index_name +{ + "conditions": { + "max_age": "7d", + "max_docs": 1000 + } +} -------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT my_old_index_name\nPUT my_old_index_name\/_alias\/my_alias\n/] [float] === Defining the new index @@ -75,7 +89,7 @@ POST my_alias/_rollover/my_new_index_name The settings, mappings, and aliases for the new index are taken from any matching <>. Additionally, you can specify `settings`, `mappings`, and `aliases` in the body of the request, just like the -<> API. Values specified in the request +<> API. Values specified in the request override any values set in matching index templates. For example, the following `rollover` request overrides the `index.number_of_shards` setting: @@ -88,14 +102,14 @@ PUT /logs-000001 } } -POST logs_write/_rollover +POST /logs_write/_rollover { "conditions" : { "max_age": "7d", "max_docs": 1000 }, - "settings": { - "index.number_of_shards": 2 + "settings": { + "index.number_of_shards": 2 } } -------------------------------------------------- @@ -116,7 +130,7 @@ PUT /logs-000001 } } -POST logs_write/_rollover?dry_run +POST /logs_write/_rollover?dry_run { "conditions" : { "max_age": "7d", @@ -129,6 +143,6 @@ POST logs_write/_rollover?dry_run [float] === Wait For Active Shards -Because the rollover operation creates a new index to rollover to, the -<> setting on +Because the rollover operation creates a new index to rollover to, the +<> setting on index creation applies to the rollover action as well. diff --git a/docs/reference/indices/shrink-index.asciidoc b/docs/reference/indices/shrink-index.asciidoc index 713f2149547..027cf8b924d 100644 --- a/docs/reference/indices/shrink-index.asciidoc +++ b/docs/reference/indices/shrink-index.asciidoc @@ -41,6 +41,8 @@ PUT /my_source_index/_settings } } -------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT my_source_index\n/] <1> Forces the relocation of a copy of each shard to the node with name `shrink_node_name`. See <> for more options. @@ -62,6 +64,8 @@ the following request: -------------------------------------------------- POST my_source_index/_shrink/my_target_index -------------------------------------------------- +// CONSOLE +// TEST[continued] The above request returns immediately once the target index has been added to the cluster state -- it doesn't wait for the shrink operation to start. @@ -105,6 +109,8 @@ POST my_source_index/_shrink/my_target_index } } -------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT my_source_index\n{"settings": {"index.blocks.write": true}}\n/] <1> The number of shards in the target index. This must be a factor of the number of shards in the source index. @@ -139,6 +145,6 @@ replicas and may decide to relocate the primary shard to another node. [float] === Wait For Active Shards -Because the shrink operation creates a new index to shrink the shards to, -the <> setting +Because the shrink operation creates a new index to shrink the shards to, +the <> setting on index creation applies to the shrink index action as well. diff --git a/docs/reference/indices/stats.asciidoc b/docs/reference/indices/stats.asciidoc index e990a7ff6bd..a95b1c81ae1 100644 --- a/docs/reference/indices/stats.asciidoc +++ b/docs/reference/indices/stats.asciidoc @@ -10,15 +10,18 @@ all indices: [source,js] -------------------------------------------------- -curl localhost:9200/_stats +GET /_stats -------------------------------------------------- +// CONSOLE Specific index stats can be retrieved using: [source,js] -------------------------------------------------- -curl localhost:9200/index1,index2/_stats +GET /index1,index2/_stats -------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT index1\nPUT index2\n/] By default, all stats are returned, returning only specific stats can be specified as well in the URI. Those stats can be any of: @@ -74,12 +77,14 @@ Here are some samples: [source,js] -------------------------------------------------- # Get back stats for merge and refresh only for all indices -curl 'localhost:9200/_stats/merge,refresh' +GET /_stats/merge,refresh # Get back stats for type1 and type2 documents for the my_index index -curl 'localhost:9200/my_index/_stats/indexing?types=type1,type2 +GET /my_index/_stats/indexing?types=type1,type2 # Get back just search stats for group1 and group2 -curl 'localhost:9200/_stats/search?groups=group1,group2 +GET /_stats/search?groups=group1,group2 -------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT my_index\n/] The stats returned are aggregated on the index level, with `primaries` and `total` aggregations, where `primaries` are the values for only the @@ -91,4 +96,3 @@ Note, as shards move around the cluster, their stats will be cleared as they are created on other nodes. On the other hand, even though a shard "left" a node, that node will still retain the stats that shard contributed to. - diff --git a/docs/reference/indices/templates.asciidoc b/docs/reference/indices/templates.asciidoc index 754a93e3096..6e2f7ce91f1 100644 --- a/docs/reference/indices/templates.asciidoc +++ b/docs/reference/indices/templates.asciidoc @@ -38,6 +38,7 @@ PUT _template/template_1 } -------------------------------------------------- // CONSOLE +// TESTSETUP Defines a template named template_1, with a template pattern of `te*`. The settings and mappings will be applied to any index name that matches @@ -47,7 +48,7 @@ It is also possible to include aliases in an index template as follows: [source,js] -------------------------------------------------- -curl -XPUT localhost:9200/_template/template_1 -d ' +PUT _template/template_1 { "template" : "te*", "settings" : { @@ -64,8 +65,9 @@ curl -XPUT localhost:9200/_template/template_1 -d ' "{index}-alias" : {} <1> } } -' -------------------------------------------------- +// CONSOLE +// TEST[s/^/DELETE _template\/template_1\n/] <1> the `{index}` placeholder within the alias name will be replaced with the actual index name that the template gets applied to during index creation. @@ -79,8 +81,9 @@ Index templates are identified by a name (in the above case [source,js] -------------------------------------------------- -curl -XDELETE localhost:9200/_template/template_1 +DELETE /_template/template_1 -------------------------------------------------- +// CONSOLE [float] [[getting]] @@ -91,24 +94,26 @@ Index templates are identified by a name (in the above case [source,js] -------------------------------------------------- -curl -XGET localhost:9200/_template/template_1 +GET /_template/template_1 -------------------------------------------------- +// CONSOLE You can also match several templates by using wildcards like: [source,js] -------------------------------------------------- -curl -XGET localhost:9200/_template/temp* -curl -XGET localhost:9200/_template/template_1,template_2 +GET /_template/temp* +GET /_template/template_1,template_2 -------------------------------------------------- +// CONSOLE To get list of all index templates you can run: [source,js] -------------------------------------------------- -curl -XGET localhost:9200/_template/ +GET /_template -------------------------------------------------- - +// CONSOLE [float] [[indices-templates-exists]] @@ -118,13 +123,13 @@ Used to check if the template exists or not. For example: [source,js] ----------------------------------------------- -curl -XHEAD -i localhost:9200/_template/template_1 +HEAD _template/template_1 ----------------------------------------------- +// CONSOLE The HTTP status code indicates if the template with the given name exists or not. A status code `200` means it exists, a `404` it does not. - [float] [[multiple-templates]] === Multiple Template Matching @@ -137,7 +142,7 @@ orders overriding them. For example: [source,js] -------------------------------------------------- -curl -XPUT localhost:9200/_template/template_1 -d ' +PUT /_template/template_1 { "template" : "*", "order" : 0, @@ -150,9 +155,8 @@ curl -XPUT localhost:9200/_template/template_1 -d ' } } } -' -curl -XPUT localhost:9200/_template/template_2 -d ' +PUT /_template/template_2 { "template" : "te*", "order" : 1, @@ -165,8 +169,9 @@ curl -XPUT localhost:9200/_template/template_2 -d ' } } } -' -------------------------------------------------- +// CONSOLE +// TEST[s/^/DELETE _template\/template_1\n/] The above will disable storing the `_source` on all `type1` types, but for indices of that start with `te*`, source will still be enabled. diff --git a/docs/reference/ingest/ingest-node.asciidoc b/docs/reference/ingest/ingest-node.asciidoc index f0c0e9f6c13..a06e3b9e1df 100644 --- a/docs/reference/ingest/ingest-node.asciidoc +++ b/docs/reference/ingest/ingest-node.asciidoc @@ -1495,3 +1495,115 @@ Converts a string to its uppercase equivalent. } } -------------------------------------------------- + +[[dot-expand-processor]] +=== Dot Expander Processor + +Expands a field with dots into an object field. This processor allows fields +with dots in the name to be accessible by other processors in the pipeline. +Otherwise these < can't be accessed by any processor. + +[[dot-expender-options]] +.Dot Expand Options +[options="header"] +|====== +| Name | Required | Default | Description +| `field` | yes | - | The field to expand into an object field +| `path` | no | - | The field that contains the field to expand. Only required if the field to expand is part another object field, because the `field` option can only understand leaf fields. +|====== + +[source,js] +-------------------------------------------------- +{ + "dot_expander": { + "field": "foo.bar" + } +} +-------------------------------------------------- + +For example the dot expand processor would turn this document: + +[source,js] +-------------------------------------------------- +{ + "foo.bar" : "value" +} +-------------------------------------------------- + +into: + +[source,js] +-------------------------------------------------- +{ + "foo" : { + "bar" : "value" + } +} +-------------------------------------------------- + +If there is already a `bar` field nested under `foo` then +this processor merges the the `foo.bar` field into it. If the field is +a scalar value then it will turn that field into an array field. + +For example, the following document: + +[source,js] +-------------------------------------------------- +{ + "foo.bar" : "value2", + "foo" : { + "bar" : "value1" + } +} +-------------------------------------------------- + +is transformed by the `dot_expander` processor into: + +[source,js] +-------------------------------------------------- +{ + "foo" : { + "bar" : ["value1", "value2"] + } +} +-------------------------------------------------- + +If any field outside of the leaf field conflicts with a pre-existing field of the same name, +then that field needs to be renamed first. + +Consider the following document: + +[source,js] +-------------------------------------------------- +{ + "foo": "value1", + "foo.bar": "value2" +} +-------------------------------------------------- + +Then the the `foo` needs to be renamed first before the `dot_expander` +processor is applied. So in order for the `foo.bar` field to properly +be expanded into the `bar` field under the `foo` field the following +pipeline should be used: + +[source,js] +-------------------------------------------------- +{ + "processors" : [ + { + "rename" : { + "field" : "foo", + "target_field" : "foo.bar"" + } + }, + { + "dot_expander": { + "field": "foo.bar" + } + } + ] +} +-------------------------------------------------- + +The reason for this is that Ingest doesn't know how to automatically cast +a scalar field to an object field. \ No newline at end of file diff --git a/docs/reference/mapping/params/geohash-prefix.asciidoc b/docs/reference/mapping/params/geohash-prefix.asciidoc index 33bd21bdeb6..51dfc829947 100644 --- a/docs/reference/mapping/params/geohash-prefix.asciidoc +++ b/docs/reference/mapping/params/geohash-prefix.asciidoc @@ -41,6 +41,7 @@ PUT my_index } } -------------------------------------------------- +// CONSOLE // TEST[warning:geo_point geohash_precision parameter is deprecated and will be removed in the next major release] // TEST[warning:geo_point geohash_prefix parameter is deprecated and will be removed in the next major release] // TEST[warning:geo_point geohash parameter is deprecated and will be removed in the next major release] diff --git a/docs/reference/mapping/params/lat-lon.asciidoc b/docs/reference/mapping/params/lat-lon.asciidoc index 002f91ef3f3..234c652c932 100644 --- a/docs/reference/mapping/params/lat-lon.asciidoc +++ b/docs/reference/mapping/params/lat-lon.asciidoc @@ -31,6 +31,7 @@ PUT my_index } } -------------------------------------------------- +// CONSOLE // TEST[warning:geo_point lat_lon parameter is deprecated and will be removed in the next major release] <1> Setting `lat_lon` to true indexes the geo-point in the `location.lat` and `location.lon` fields. diff --git a/docs/reference/migration/migrate_5_0/index-apis.asciidoc b/docs/reference/migration/migrate_5_0/index-apis.asciidoc index 8e51366c4d5..c8f939c259d 100644 --- a/docs/reference/migration/migrate_5_0/index-apis.asciidoc +++ b/docs/reference/migration/migrate_5_0/index-apis.asciidoc @@ -64,3 +64,8 @@ The `/_aliases` API no longer supports `indexRouting` and `index-routing`, only `index_routing`. It also no longer support `searchRouting` and `search-routing`, only `search_routing`. These were removed because they were untested and we prefer there to be only one (obvious) way to do things like this. + +==== OpType Create without an ID + +As of 5.0 indexing a document with `op_type=create` without specifying an ID is not +supported anymore. diff --git a/docs/reference/migration/migrate_5_0/java.asciidoc b/docs/reference/migration/migrate_5_0/java.asciidoc index 5eee1eccd86..e0f7fba26b0 100644 --- a/docs/reference/migration/migrate_5_0/java.asciidoc +++ b/docs/reference/migration/migrate_5_0/java.asciidoc @@ -67,12 +67,13 @@ client.prepareSearch(indices).suggest(new SuggestBuilder().addSuggestion("foo", ==== Elasticsearch will no longer detect logging implementations -Elasticsearch now logs only to log4j 1.2. Previously if log4j wasn't on the -classpath it made some effort to degrade to slf4j or java.util.logging. Now it -will fail to work without the log4j 1.2 api. The log4j-over-slf4j bridge ought -to work when using the java client, as should log4j 2's log4j-1.2-api. The -Elasticsearch server now only supports log4j as configured by `logging.yml` -and will fail if log4j isn't present. +Elasticsearch now logs using Log4j 2. Previously if Log4j wasn't on the +classpath it made some effort to degrade to SLF4J or Java logging. Now it will +fail to work without the Log4j 2 API. The log4j-over-slf4j bridge ought to work +when using the Java client. The log4j-1.2-api bridge is used for third-party +dependencies that still use the Log4j 1 API. The Elasticsearch server now only +supports Log4j 2 as configured by `log4j2.properties` and will fail if Log4j +isn't present. ==== Groovy dependencies @@ -348,7 +349,9 @@ The `setQuery(BytesReference)` method have been removed in favor of using `setQu ==== ClusterStatsResponse Removed the `getMemoryAvailable` method from `OsStats`, which could be previously accessed calling -`clusterStatsResponse.getNodesStats().getOs().getMemoryAvailable()`. +`clusterStatsResponse.getNodesStats().getOs().getMemoryAvailable()`. It is now replaced with +`clusterStatsResponse.getNodesStats().getOs().getMem()` which exposes `getTotal()`, `getFree()`, +`getUsed()`, `getFreePercent()` and `getUsedPercent()`. ==== setRefresh(boolean) has been removed diff --git a/docs/reference/migration/migrate_5_0/percolator.asciidoc b/docs/reference/migration/migrate_5_0/percolator.asciidoc index f173a0df958..fa51e72c0ff 100644 --- a/docs/reference/migration/migrate_5_0/percolator.asciidoc +++ b/docs/reference/migration/migrate_5_0/percolator.asciidoc @@ -24,6 +24,10 @@ Instead a <> must be configured prior to index Indices with a `.percolator` type created on a version before 5.0.0 can still be used, but new indices no longer accept the `.percolator` type. +However it is strongly recommended to reindex any indices containing percolator queries created prior +upgrading to Elasticsearch 5. By doing this the `percolate` query utilize the extracted terms the `percolator` +field type extracted from the percolator queries and potentially execute many times faster. + ==== Percolate document mapping The `percolate` query no longer modifies the mappings. Before the percolate API @@ -53,6 +57,22 @@ The percolate stats have been removed. This is because the percolator no longer The percolator no longer accepts percolator queries containing `range` queries with ranges that are based on current time (using `now`). +==== Percolator queries containing scripts. + +Percolator queries that contain scripts (For example: `script` query or a `function_score` query script function) that +have no explicit language specified will use the Painless scripting language from version 5.0 and up. + +Scripts with no explicit language set in percolator queries stored in indices created prior to version 5.0 +will use the language that has been configured in the `script.legacy.default_lang` setting. This setting defaults to +the Groovy scripting language, which was the default for versions prior to 5.0. If your default scripting language was +different then set the `script.legacy.default_lang` setting to the language you used before. + +In order to make use of the new `percolator` field type all percolator queries should be reindexed into a new index. +When reindexing percolator queries with scripts that have no explicit language defined into a new index, one of the +following two things should be done in order to make the scripts work: +* (Recommended approach) While reindexing the percolator documents, migrate the scripts to the Painless scripting language. +* or add `lang` parameter on the script and set it the language these scripts were written in. + ==== Java client The percolator is no longer part of the core elasticsearch dependency. It has moved to the percolator module. diff --git a/docs/reference/migration/migrate_5_0/rest.asciidoc b/docs/reference/migration/migrate_5_0/rest.asciidoc index 278acd52c43..afba8819ed3 100644 --- a/docs/reference/migration/migrate_5_0/rest.asciidoc +++ b/docs/reference/migration/migrate_5_0/rest.asciidoc @@ -29,9 +29,9 @@ document exists in an index. The old endpoint will keep working until 6.0. ==== Removed `mem` section from `/_cluster/stats` response -The `mem` section contained only one value, the total memory available -throughout all nodes in the cluster. The section was removed as it didn't -prove useful. +The `mem` section contained only the `total` value, which was actually the +memory available throughout all nodes in the cluster. The section contains now +`total`, `free`, `used`, `used_percent` and `free_percent`. ==== Revised node roles aggregate returned by `/_cluster/stats` @@ -67,8 +67,8 @@ removed in Elasticsearch 6.0.0. ==== Analyze API changes -The deprecated `filters`/`token_filters`/`char_filters` parameter has been -renamed `filter`/`token_filter`/`char_filter`. +The `filters` and `char_filters` parameters have been renamed `filter` and `char_filter`. +The `token_filters` parameter has been removed. Use `filter` instead. ==== `DELETE /_query` endpoint removed diff --git a/docs/reference/migration/migrate_5_0/scripting.asciidoc b/docs/reference/migration/migrate_5_0/scripting.asciidoc index 4d42ae98b46..3e0db9e1cbd 100644 --- a/docs/reference/migration/migrate_5_0/scripting.asciidoc +++ b/docs/reference/migration/migrate_5_0/scripting.asciidoc @@ -9,8 +9,6 @@ to help make the transition between languages as simple as possible. Documentation for Painless can be found at <> -It is also possible to set the default language back to Groovy using the following setting: `script.default_lang: groovy` - One common difference to note between Groovy and Painless is the use of parameters -- all parameters in Painless must be prefixed with `params.` now. The following example shows the difference: @@ -48,6 +46,12 @@ Painless (`my_modifer` is prefixed with `params`): } ----------------------------------- +The `script.default_lang` setting has been removed. It is no longer possible set the default scripting language. If a +different language than `painless` is used then this should be explicitly specified on the script itself. + +For scripts with no explicit language defined, that are part of already stored percolator queries, the default language +can be controlled with the `script.legacy.default_lang` setting. + ==== Removed 1.x script and template syntax The deprecated 1.x syntax of defining inline scripts / templates and referring to file or index base scripts / templates diff --git a/docs/reference/migration/migrate_5_0/suggest.asciidoc b/docs/reference/migration/migrate_5_0/suggest.asciidoc index 0b67711fe00..6979e8718e3 100644 --- a/docs/reference/migration/migrate_5_0/suggest.asciidoc +++ b/docs/reference/migration/migrate_5_0/suggest.asciidoc @@ -3,7 +3,8 @@ The completion suggester has undergone a complete rewrite. This means that the syntax and data structure for fields of type `completion` have changed, as -have the syntax and response of completion suggester requests. +have the syntax and response of completion suggester requests. See +<> for details. For indices created before Elasticsearch 5.0.0, `completion` fields and the completion suggester will continue to work as they did in Elasticsearch 2.x. @@ -25,35 +26,17 @@ to suggestion entries for both context and completion suggesters. ==== Completion suggester is document-oriented -Suggestions are aware of the document they belong to. This enables -retrieving any field value from the document. This is exposed -through the query-time `payload` option in `completion` and `context` -suggesters: +Suggestions are aware of the document they belong to. Now, associated +documents (`_source`) are returned as part of `completion` suggestions. -[source,sh] ---------------- -GET /my_index/_search -{ - "suggest": { - "fooSuggestion": { - "text": "f" - "completion": { - "field": "fooSuggest", - "payload": ["field1", "field2"] - } - } - } -} ---------------- +IMPORTANT: `_source` meta-field must be enabled, which is the default behavior, +to enable returning `_source` with suggestions. Previously, `context` and `completion` suggesters supported an index-time `payloads` option, which was used to store and return metadata with suggestions. -Now metadata can be stored as a field in the same document as the -suggestion for enabling retrieval at query-time. The support for -index-time `payloads` has been removed to avoid bloating the in-memory -index with suggestion metadata. The time that it takes to retrieve payloads -depends heavily on the size of the `_source` field. The smaller the `_source`, -the faster the retrieval. +Now metadata can be stored as part of the the same document as the +suggestion for retrieval at query-time. The support for index-time `payloads` +has been removed to avoid bloating the in-memory index with suggestion metadata. ==== Simpler completion indexing diff --git a/docs/reference/modules/scripting.asciidoc b/docs/reference/modules/scripting.asciidoc index 76065210569..253fab3941b 100644 --- a/docs/reference/modules/scripting.asciidoc +++ b/docs/reference/modules/scripting.asciidoc @@ -5,12 +5,7 @@ The scripting module enables you to use scripts to evaluate custom expressions. For example, you could use a script to return "script fields" as part of a search request or evaluate a custom score for a query. -TIP: Elasticsearch now has a built-in scripting language called _Painless_ -that provides a more secure alternative for implementing -scripts for Elasticsearch. We encourage you to try it out -- -for more information, see <>. - -The default scripting language is http://groovy-lang.org/[groovy]. +The default scripting language is <>. Additional `lang` plugins enable you to run scripts written in other languages. Everywhere a script can be used, you can include a `lang` parameter to specify the language of the script. diff --git a/docs/reference/modules/scripting/groovy.asciidoc b/docs/reference/modules/scripting/groovy.asciidoc index 07551474e2c..aaacd85f243 100644 --- a/docs/reference/modules/scripting/groovy.asciidoc +++ b/docs/reference/modules/scripting/groovy.asciidoc @@ -1,6 +1,8 @@ [[modules-scripting-groovy]] === Groovy Scripting Language +deprecated[5.0.0,Groovy will be replaced by the new scripting language <>] + Groovy is the default scripting language available in Elasticsearch. Although limited by the <>, it is not a sandboxed language and only `file` scripts may be used by default. diff --git a/docs/reference/modules/scripting/using.asciidoc b/docs/reference/modules/scripting/using.asciidoc index 0c29f82b4d4..17ab4a8180a 100644 --- a/docs/reference/modules/scripting/using.asciidoc +++ b/docs/reference/modules/scripting/using.asciidoc @@ -12,7 +12,7 @@ the same pattern: "params": { ... } <3> } ------------------------------------- -<1> The language the script is written in, which defaults to `groovy`. +<1> The language the script is written in, which defaults to `painless`. <2> The script itself which may be specfied as `inline`, `id`, or `file`. <3> Any named parameters that should be passed into the script. diff --git a/docs/reference/query-dsl/function-score-query.asciidoc b/docs/reference/query-dsl/function-score-query.asciidoc index 68ba9696aa0..3072372d179 100644 --- a/docs/reference/query-dsl/function-score-query.asciidoc +++ b/docs/reference/query-dsl/function-score-query.asciidoc @@ -359,6 +359,7 @@ image:images/Gaussian.png[] where image:images/sigma.png[] is computed to assure that the score takes the value `decay` at distance `scale` from `origin`+-`offset` +// \sigma^2 = -scale^2/(2 \cdot ln(decay)) image:images/sigma_calc.png[] See <> for graphs demonstrating the curve generated by the `gauss` function. @@ -374,6 +375,7 @@ image:images/Exponential.png[] where again the parameter image:images/lambda.png[] is computed to assure that the score takes the value `decay` at distance `scale` from `origin`+-`offset` +// \lambda = ln(decay)/scale image:images/lambda_calc.png[] See <> for graphs demonstrating the curve generated by the `exp` function. diff --git a/docs/reference/query-dsl/match-phrase-prefix-query.asciidoc b/docs/reference/query-dsl/match-phrase-prefix-query.asciidoc index e7e0f618652..40cfabdc96f 100644 --- a/docs/reference/query-dsl/match-phrase-prefix-query.asciidoc +++ b/docs/reference/query-dsl/match-phrase-prefix-query.asciidoc @@ -59,6 +59,6 @@ for appears. For better solutions for _search-as-you-type_ see the <> and -{guide}/_index_time_search_as_you_type.html[Index-Time Search-as-You-Type]. +{defguide}/_index_time_search_as_you_type.html[Index-Time Search-as-You-Type]. =================================================== diff --git a/docs/reference/search/suggesters/completion-suggest.asciidoc b/docs/reference/search/suggesters/completion-suggest.asciidoc index 587cdf86bd7..60065ce96bb 100644 --- a/docs/reference/search/suggesters/completion-suggest.asciidoc +++ b/docs/reference/search/suggesters/completion-suggest.asciidoc @@ -194,26 +194,50 @@ returns this response: -------------------------------------------------- // TESTRESPONSE -The configured weight for a suggestion is returned as `_score`. -The `text` field uses the `input` of your indexed suggestion. -Suggestions are document oriented, the document source is -returned in `_source`. <> -parameters are supported for filtering the document source. + +IMPORTANT: `_source` meta-field must be enabled, which is the default +behavior, to enable returning `_source` with suggestions. + +The configured weight for a suggestion is returned as `_score`. The +`text` field uses the `input` of your indexed suggestion. Suggestions +return the full document `_source` by default. The size of the `_source` +can impact performance due to disk fetch and network transport overhead. +For best performance, filter out unnecessary fields from the `_source` +using <> to minimize +`_source` size. The following demonstrates an example completion query +with source filtering: + +[source,js] +-------------------------------------------------- +POST music/_suggest +{ + "_source": "completion.*", + "song-suggest" : { + "prefix" : "nir", + "completion" : { + "field" : "suggest" + } + } +} +-------------------------------------------------- The basic completion suggester query supports the following parameters: `field`:: The name of the field on which to run the query (required). `size`:: The number of suggestions to return (defaults to `5`). -`payload`:: The name of the field or field name array to be returned - as payload (defaults to no fields). NOTE: The completion suggester considers all documents in the index. See <> for an explanation of how to query a subset of documents instead. -NOTE: Specifying `payload` fields will incur additional search performance -hit. The `payload` fields are retrieved eagerly (single pass) for top -suggestions at the shard level using field data or from doc values. +NOTE: In case of completion queries spanning more than one shard, the suggest +is executed in two phases, where the last phase fetches the relevant documents +from shards, implying executing completion requests against a single shard is +more performant due to the document fetch overhead when the suggest spans +multiple shards. To get best performance for completions, it is recommended to +index completions into a single shard index. In case of high heap usage due to +shard size, it is still recommended to break index into multiple shards instead +of optimizing for completion performance. [[fuzzy]] ==== Fuzzy queries diff --git a/docs/reference/search/suggesters/context-suggest.asciidoc b/docs/reference/search/suggesters/context-suggest.asciidoc index fec167c7b84..293e8bb573c 100644 --- a/docs/reference/search/suggesters/context-suggest.asciidoc +++ b/docs/reference/search/suggesters/context-suggest.asciidoc @@ -71,6 +71,7 @@ PUT place_path_category } } -------------------------------------------------- +// CONSOLE // TESTSETUP <1> Defines a `category` context named 'place_type' where the categories must be sent with the suggestions. @@ -330,6 +331,7 @@ POST place/_suggest?pretty } } -------------------------------------------------- +// CONSOLE // TEST[continued] <1> The context query filters for suggestions that fall under the geo location represented by a geohash of '(43.662, -79.380)' diff --git a/docs/reference/search/suggesters/phrase-suggest.asciidoc b/docs/reference/search/suggesters/phrase-suggest.asciidoc index 755650e824d..dace399d650 100644 --- a/docs/reference/search/suggesters/phrase-suggest.asciidoc +++ b/docs/reference/search/suggesters/phrase-suggest.asciidoc @@ -77,6 +77,7 @@ POST test/test {"title": "nobel prize"} POST _refresh -------------------------------------------------- +// CONSOLE // TESTSETUP Once you have the analyzers and mappings set up you can use the `phrase` diff --git a/docs/reference/search/validate.asciidoc b/docs/reference/search/validate.asciidoc index 8a2730c0148..5fb4ad9b7ce 100644 --- a/docs/reference/search/validate.asciidoc +++ b/docs/reference/search/validate.asciidoc @@ -111,6 +111,7 @@ GET twitter/tweet/_validate/query?q=post_date:foo&explain=true responds with: +[source,js] -------------------------------------------------- { "valid" : false, diff --git a/docs/reference/setup/configuration.asciidoc b/docs/reference/setup/configuration.asciidoc index 88aaee8580d..518fb24a8bd 100644 --- a/docs/reference/setup/configuration.asciidoc +++ b/docs/reference/setup/configuration.asciidoc @@ -15,7 +15,7 @@ able to join a cluster, such as `cluster.name` and `network.host`. Elasticsearch has two configuration files: * `elasticsearch.yml` for configuring Elasticsearch, and -* `logging.yml` for configuring Elasticsearch logging. +* `log4j2.properties` for configuring Elasticsearch logging. These files are located in the config directory, whose location defaults to `$ES_HOME/config/`. The Debian and RPM packages set the config directory @@ -110,24 +110,50 @@ command line with `es.node.name` or in the config file with `node.name`. [[logging]] == Logging configuration -Elasticsearch uses an internal logging abstraction and comes, out of the -box, with http://logging.apache.org/log4j/1.2/[log4j]. It tries to simplify -log4j configuration by using http://www.yaml.org/[YAML] to configure it, -and the logging configuration file is `config/logging.yml`. The -http://en.wikipedia.org/wiki/JSON[JSON] and -http://en.wikipedia.org/wiki/.properties[properties] formats are also -supported. Multiple configuration files can be loaded, in which case they will -get merged, as long as they start with the `logging.` prefix and end with one -of the supported suffixes (either `.yml`, `.yaml`, `.json` or `.properties`). -The logger section contains the java packages and their corresponding log -level, where it is possible to omit the `org.elasticsearch` prefix. The -appender section contains the destinations for the logs. Extensive information -on how to customize logging and all the supported appenders can be found on -the http://logging.apache.org/log4j/1.2/manual.html[log4j documentation]. +Elasticsearch uses http://logging.apache.org/log4j/2.x/[Log4j 2] for +logging. Log4j 2 can be configured using the log4j2.properties +file. Elasticsearch exposes a single property `${sys:es.logs}` that can be +referenced in the configuration file to determine the location of the log files; +this will resolve to a prefix for the Elasticsearch log file at runtime. -Additional Appenders and other logging classes provided by -http://logging.apache.org/log4j/extras/[log4j-extras] are also available, -out of the box. +For example, if your log directory (`path.logs`) is `/var/log/elasticsearch` and +your cluster is named `production` then `${sys:es.logs}` will resolve to +`/var/log/elasticsearch/production`. + +[source,properties] +-------------------------------------------------- +appender.rolling.type = RollingFile <1> +appender.rolling.name = rolling +appender.rolling.fileName = ${sys:es.logs}.log <2> +appender.rolling.layout.type = PatternLayout +appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %.10000m%n +appender.rolling.filePattern = ${sys:es.logs}-%d{yyyy-MM-dd}.log <3> +appender.rolling.policies.type = Policies +appender.rolling.policies.time.type = TimeBasedTriggeringPolicy <4> +appender.rolling.policies.time.interval = 1 <5> +appender.rolling.policies.time.modulate = true <6> +-------------------------------------------------- + +<1> Configure the `RollingFile` appender +<2> Log to `/var/log/elasticsearch/production.log` +<3> Roll logs to `/var/log/elasticsearch/production-yyyy-MM-dd.log` +<4> Using a time-based roll policy +<5> Roll logs on a daily basis +<6> Align rolls on the day boundary (as opposed to rolling every twenty-four + hours) + +If you append `.gz` or `.zip` to `appender.rolling.filePattern`, then the logs +will be compressed as they are rolled. + +Multiple configuration files can be loaded (in which case they will get merged) +as long as they are named `log4j2.properties` and have the Elasticsearch config +directory as an ancestor; this is useful for plugins that expose additional +loggers. The logger section contains the java packages and their corresponding +log level, where it is possible to omit the `org.elasticsearch` prefix. The +appender section contains the destinations for the logs. Extensive information +on how to customize logging and all the supported appenders can be found on the +http://logging.apache.org/log4j/2.x/manual/configuration.html[Log4j +documentation]. [float] [[deprecation-logging]] @@ -139,14 +165,18 @@ you need to migrate certain functionality in the future. By default, deprecation logging is enabled at the WARN level, the level at which all deprecation log messages will be emitted. -[source,yaml] +[source,properties] -------------------------------------------------- -deprecation: WARN, deprecation_log_file +logger.deprecation.level = warn -------------------------------------------------- This will create a daily rolling deprecation log file in your log directory. Check this file regularly, especially when you intend to upgrade to a new major version. -You can disable it in the `config/logging.yml` file by setting the deprecation -log level to `INFO`. +The default logging configuration has set the roll policy for the deprecation +logs to roll and compress after 1 GB, and to preserve a maximum of five log +files (four rolled logs, and the active log). + +You can disable it in the `config/log4j2.properties` file by setting the deprecation +log level to `info`. diff --git a/docs/reference/setup/install/check-running.asciidoc b/docs/reference/setup/install/check-running.asciidoc index db4783c3dfa..2e255ec35e5 100644 --- a/docs/reference/setup/install/check-running.asciidoc +++ b/docs/reference/setup/install/check-running.asciidoc @@ -3,26 +3,32 @@ You can test that your Elasticsearch node is running by sending an HTTP request to port `9200` on `localhost`: -[source,sh] +[source,js] -------------------------------------------- -curl localhost:9200 +GET / -------------------------------------------- +// CONSOLE which should give you a response something like this: -[source,js] +["source","js",subs="attributes,callouts"] -------------------------------------------- { - "name" : "Harry Leland", + "name" : "Cp8oag6", "cluster_name" : "elasticsearch", "version" : { - "number" : "5.0.0-alpha1", + "number" : "{version}", "build_hash" : "f27399d", "build_date" : "2016-03-30T09:51:41.449Z", "build_snapshot" : false, - "lucene_version" : "6.0.0" + "lucene_version" : "{lucene_version}" }, "tagline" : "You Know, for Search" } -------------------------------------------- - +// TESTRESPONSE[s/"name" : "Cp8oag6",/"name" : "$body.name",/] +// TESTRESPONSE[s/"cluster_name" : "elasticsearch",/"cluster_name" : "$body.cluster_name",/] +// TESTRESPONSE[s/"build_hash" : "f27399d",/"build_hash" : "$body.version.build_hash",/] +// TESTRESPONSE[s/"build_date" : "2016-03-30T09:51:41.449Z",/"build_date" : $body.version.build_date,/] +// TESTRESPONSE[s/"build_snapshot" : false,/"build_snapshot" : $body.version.build_snapshot,/] +// So much s/// but at least we test that the layout is close to matching.... diff --git a/docs/reference/setup/install/sysconfig-file.asciidoc b/docs/reference/setup/install/sysconfig-file.asciidoc index da3f0910964..8cf3482bf33 100644 --- a/docs/reference/setup/install/sysconfig-file.asciidoc +++ b/docs/reference/setup/install/sysconfig-file.asciidoc @@ -40,7 +40,7 @@ `CONF_DIR`:: Configuration file directory (which needs to include `elasticsearch.yml` - and `logging.yml` files), defaults to `/etc/elasticsearch`. + and `log4j2.properties` files), defaults to `/etc/elasticsearch`. `ES_JAVA_OPTS`:: diff --git a/docs/reference/setup/install/windows.asciidoc b/docs/reference/setup/install/windows.asciidoc index e66c176470f..ef0c5f2a71f 100644 --- a/docs/reference/setup/install/windows.asciidoc +++ b/docs/reference/setup/install/windows.asciidoc @@ -156,7 +156,7 @@ The Elasticsearch service can be configured prior to installation by setting the `CONF_DIR`:: Configuration file directory (which needs to include `elasticsearch.yml` - and `logging.yml` files), defaults to `%ES_HOME%\conf`. + and `log4j2.properties` files), defaults to `%ES_HOME%\conf`. `ES_JAVA_OPTS`:: diff --git a/docs/reference/setup/sysconfig/heap_size.asciidoc b/docs/reference/setup/sysconfig/heap_size.asciidoc index 6bb32097a0a..00c4553b97f 100644 --- a/docs/reference/setup/sysconfig/heap_size.asciidoc +++ b/docs/reference/setup/sysconfig/heap_size.asciidoc @@ -63,7 +63,6 @@ in the jvm.options file and setting these values via `ES_JAVA_OPTS`: ES_JAVA_OPTS="-Xms2g -Xmx2g" ./bin/elasticsearch <1> ES_JAVA_OPTS="-Xms4000m -Xmx4000m" ./bin/elasticsearch <2> ------------------ -// NOTCONSOLE <1> Set the minimum and maximum heap size to 2 GB. <2> Set the minimum and maximum heap size to 4000 MB. diff --git a/docs/reference/setup/sysconfig/swap.asciidoc b/docs/reference/setup/sysconfig/swap.asciidoc index b3a6e0aa806..ce95e52fbeb 100644 --- a/docs/reference/setup/sysconfig/swap.asciidoc +++ b/docs/reference/setup/sysconfig/swap.asciidoc @@ -70,7 +70,6 @@ specifying a new temp directory, by starting Elasticsearch with: -------------- ./bin/elasticsearch -Djava.io.tmpdir=/path/to/temp/dir -------------- -// NOTCONSOLE or using the `ES_JAVA_OPTS` environment variable: @@ -79,7 +78,6 @@ or using the `ES_JAVA_OPTS` environment variable: export ES_JAVA_OPTS="$ES_JAVA_OPTS -Djava.io.tmpdir=/path/to/temp/dir" ./bin/elasticsearch -------------- -// NOTCONSOLE [[disable-swap-files]] ==== Disable all swap files diff --git a/docs/reference/testing/testing-framework.asciidoc b/docs/reference/testing/testing-framework.asciidoc index fe7daf8cca4..0bf99b2fafa 100644 --- a/docs/reference/testing/testing-framework.asciidoc +++ b/docs/reference/testing/testing-framework.asciidoc @@ -20,7 +20,7 @@ All of the tests are run using a custom junit runner, the `RandomizedRunner` pro First, you need to include the testing dependency in your project, along with the elasticsearch dependency you have already added. If you use maven and its `pom.xml` file, it looks like this -[[source,xml]] +[source,xml] -------------------------------------------------- @@ -258,5 +258,3 @@ assertHitCount(searchResponse, 4); assertFirstHit(searchResponse, hasId("4")); assertSearchHits(searchResponse, "1", "2", "3", "4"); ---------------------------- - - diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DotExpanderProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DotExpanderProcessor.java new file mode 100644 index 00000000000..bfc32311733 --- /dev/null +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DotExpanderProcessor.java @@ -0,0 +1,120 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.ingest.common; + +import org.elasticsearch.ingest.AbstractProcessor; +import org.elasticsearch.ingest.ConfigurationUtils; +import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.Processor; + +import java.util.Map; + +public final class DotExpanderProcessor extends AbstractProcessor { + + static final String TYPE = "dot_expander"; + + private final String path; + private final String field; + + DotExpanderProcessor(String tag, String path, String field) { + super(tag); + this.path = path; + this.field = field; + } + + @Override + @SuppressWarnings("unchecked") + public void execute(IngestDocument ingestDocument) throws Exception { + String path; + Map map; + if (this.path != null) { + path = this.path + "." + field; + map = ingestDocument.getFieldValue(this.path, Map.class); + } else { + path = field; + map = ingestDocument.getSourceAndMetadata(); + } + + if (ingestDocument.hasField(path)) { + Object value = map.remove(field); + ingestDocument.appendFieldValue(path, value); + } else { + // check whether we actually can expand the field in question into an object field. + // part of the path may already exist and if part of it would be a value field (string, integer etc.) + // then we can't override it with an object field and we should fail with a good reason. + // IngestDocument#setFieldValue(...) would fail too, but the error isn't very understandable + for (int index = path.indexOf('.'); index != -1; index = path.indexOf('.', index + 1)) { + String partialPath = path.substring(0, index); + if (ingestDocument.hasField(partialPath)) { + Object val = ingestDocument.getFieldValue(partialPath, Object.class); + if ((val instanceof Map) == false) { + throw new IllegalArgumentException("cannot expend [" + path + "], because [" + partialPath + + "] is not an object field, but a value field"); + } + } else { + break; + } + } + Object value = map.remove(field); + ingestDocument.setFieldValue(path, value); + } + } + + @Override + public String getType() { + return TYPE; + } + + String getPath() { + return path; + } + + String getField() { + return field; + } + + public static final class Factory implements Processor.Factory { + + @Override + public Processor create(Map processorFactories, String tag, + Map config) throws Exception { + String field = ConfigurationUtils.readStringProperty(TYPE, tag, config, "field"); + if (field.contains(".") == false) { + throw ConfigurationUtils.newConfigurationException(ConfigurationUtils.TAG_KEY, tag, "field", + "field does not contain a dot"); + } + if (field.indexOf('.') == 0 || field.lastIndexOf('.') == field.length() - 1) { + throw ConfigurationUtils.newConfigurationException(ConfigurationUtils.TAG_KEY, tag, "field", + "Field can't start or end with a dot"); + } + int firstIndex = -1; + for (int index = field.indexOf('.'); index != -1; index = field.indexOf('.', index + 1)) { + if (index - firstIndex == 1) { + throw ConfigurationUtils.newConfigurationException(ConfigurationUtils.TAG_KEY, tag, "field", + "No space between dots"); + } + firstIndex = index; + } + + String path = ConfigurationUtils.readOptionalStringProperty(TYPE, tag, config, "path"); + return new DotExpanderProcessor(tag, path, field); + } + } +} diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java index c89f6164de7..e6948771d8d 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java @@ -61,6 +61,7 @@ public class IngestCommonPlugin extends Plugin implements IngestPlugin { processors.put(SortProcessor.TYPE, new SortProcessor.Factory()); processors.put(GrokProcessor.TYPE, new GrokProcessor.Factory(builtinPatterns)); processors.put(ScriptProcessor.TYPE, new ScriptProcessor.Factory(parameters.scriptService)); + processors.put(DotExpanderProcessor.TYPE, new DotExpanderProcessor.Factory()); return Collections.unmodifiableMap(processors); } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DotExpanderProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DotExpanderProcessorFactoryTests.java new file mode 100644 index 00000000000..be0695924ef --- /dev/null +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DotExpanderProcessorFactoryTests.java @@ -0,0 +1,100 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.ingest.common; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.test.ESTestCase; + +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; + +public class DotExpanderProcessorFactoryTests extends ESTestCase { + + public void testCreate() throws Exception { + DotExpanderProcessor.Factory factory = new DotExpanderProcessor.Factory(); + + Map config = new HashMap<>(); + config.put("field", "_field.field"); + config.put("path", "_path"); + DotExpanderProcessor processor = (DotExpanderProcessor) factory.create(null, "_tag", config); + assertThat(processor.getField(), equalTo("_field.field")); + assertThat(processor.getPath(), equalTo("_path")); + + config = new HashMap<>(); + config.put("field", "_field.field"); + processor = (DotExpanderProcessor) factory.create(null, "_tag", config); + assertThat(processor.getField(), equalTo("_field.field")); + assertThat(processor.getPath(), nullValue()); + } + + public void testValidFields() throws Exception { + DotExpanderProcessor.Factory factory = new DotExpanderProcessor.Factory(); + + String[] fields = new String[] {"a.b", "a.b.c", "a.b.c.d", "ab.cd"}; + for (String field : fields) { + Map config = new HashMap<>(); + config.put("field", field); + config.put("path", "_path"); + DotExpanderProcessor processor = (DotExpanderProcessor) factory.create(null, "_tag", config); + assertThat(processor.getField(), equalTo(field)); + assertThat(processor.getPath(), equalTo("_path")); + } + } + + public void testCreate_fieldMissing() throws Exception { + DotExpanderProcessor.Factory factory = new DotExpanderProcessor.Factory(); + + Map config = new HashMap<>(); + config.put("path", "_path"); + Exception e = expectThrows(ElasticsearchParseException.class, () -> factory.create(null, "_tag", config)); + assertThat(e.getMessage(), equalTo("[field] required property is missing")); + } + + public void testCreate_invalidFields() throws Exception { + DotExpanderProcessor.Factory factory = new DotExpanderProcessor.Factory(); + String[] fields = new String[] {"a", "abc"}; + for (String field : fields) { + Map config = new HashMap<>(); + config.put("field", field); + Exception e = expectThrows(ElasticsearchParseException.class, () -> factory.create(null, "_tag", config)); + assertThat(e.getMessage(), equalTo("[field] field does not contain a dot")); + } + + fields = new String[] {".a", "a.", "."}; + for (String field : fields) { + Map config = new HashMap<>(); + config.put("field", field); + Exception e = expectThrows(ElasticsearchParseException.class, () -> factory.create(null, "_tag", config)); + assertThat(e.getMessage(), equalTo("[field] Field can't start or end with a dot")); + } + + fields = new String[] {"a..b", "a...b", "a.b..c", "abc.def..hij"}; + for (String field : fields) { + Map config = new HashMap<>(); + config.put("field", field); + Exception e = expectThrows(ElasticsearchParseException.class, () -> factory.create(null, "_tag", config)); + assertThat(e.getMessage(), equalTo("[field] No space between dots")); + } + } + +} diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DotExpanderProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DotExpanderProcessorTests.java new file mode 100644 index 00000000000..1802090e0e5 --- /dev/null +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DotExpanderProcessorTests.java @@ -0,0 +1,144 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.ingest.common; + +import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.Processor; +import org.elasticsearch.test.ESTestCase; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; + +public class DotExpanderProcessorTests extends ESTestCase { + + public void testEscapeFields() throws Exception { + Map source = new HashMap<>(); + source.put("foo.bar", "baz1"); + IngestDocument document = new IngestDocument(source, Collections.emptyMap()); + DotExpanderProcessor processor = new DotExpanderProcessor("_tag", null, "foo.bar"); + processor.execute(document); + assertThat(document.getFieldValue("foo", Map.class).size(), equalTo(1)); + assertThat(document.getFieldValue("foo.bar", String.class), equalTo("baz1")); + + source = new HashMap<>(); + source.put("foo.bar.baz", "value"); + document = new IngestDocument(source, Collections.emptyMap()); + processor = new DotExpanderProcessor("_tag", null, "foo.bar.baz"); + processor.execute(document); + assertThat(document.getFieldValue("foo", Map.class).size(), equalTo(1)); + assertThat(document.getFieldValue("foo.bar", Map.class).size(), equalTo(1)); + assertThat(document.getFieldValue("foo.bar.baz", String.class), equalTo("value")); + + source = new HashMap<>(); + source.put("foo.bar", "baz1"); + source.put("foo", new HashMap<>(Collections.singletonMap("bar", "baz2"))); + document = new IngestDocument(source, Collections.emptyMap()); + processor = new DotExpanderProcessor("_tag", null, "foo.bar"); + processor.execute(document); + assertThat(document.getSourceAndMetadata().size(), equalTo(1)); + assertThat(document.getFieldValue("foo.bar", List.class).size(), equalTo(2)); + assertThat(document.getFieldValue("foo.bar.0", String.class), equalTo("baz2")); + assertThat(document.getFieldValue("foo.bar.1", String.class), equalTo("baz1")); + + source = new HashMap<>(); + source.put("foo.bar", "2"); + source.put("foo", new HashMap<>(Collections.singletonMap("bar", 1))); + document = new IngestDocument(source, Collections.emptyMap()); + processor = new DotExpanderProcessor("_tag", null, "foo.bar"); + processor.execute(document); + assertThat(document.getSourceAndMetadata().size(), equalTo(1)); + assertThat(document.getFieldValue("foo.bar", List.class).size(), equalTo(2)); + assertThat(document.getFieldValue("foo.bar.0", Integer.class), equalTo(1)); + assertThat(document.getFieldValue("foo.bar.1", String.class), equalTo("2")); + } + + public void testEscapeFields_valueField() throws Exception { + Map source = new HashMap<>(); + source.put("foo.bar", "baz1"); + source.put("foo", "baz2"); + IngestDocument document1 = new IngestDocument(source, Collections.emptyMap()); + Processor processor1 = new DotExpanderProcessor("_tag", null, "foo.bar"); + // foo already exists and if a leaf field and therefor can't be replaced by a map field: + Exception e = expectThrows(IllegalArgumentException.class, () -> processor1.execute(document1)); + assertThat(e.getMessage(), equalTo("cannot expend [foo.bar], because [foo] is not an object field, but a value field")); + + // so because foo is no branch field but a value field the `foo.bar` field can't be expanded + // into [foo].[bar], so foo should be renamed first into `[foo].[bar]: + IngestDocument document = new IngestDocument(source, Collections.emptyMap()); + Processor processor = new RenameProcessor("_tag", "foo", "foo.bar"); + processor.execute(document); + processor = new DotExpanderProcessor("_tag", null, "foo.bar"); + processor.execute(document); + assertThat(document.getFieldValue("foo", Map.class).size(), equalTo(1)); + assertThat(document.getFieldValue("foo.bar.0", String.class), equalTo("baz2")); + assertThat(document.getFieldValue("foo.bar.1", String.class), equalTo("baz1")); + + source = new HashMap<>(); + source.put("foo.bar", "baz1"); + document = new IngestDocument(source, Collections.emptyMap()); + processor = new DotExpanderProcessor("_tag", null, "foo.bar"); + processor.execute(document); + assertThat(document.getFieldValue("foo", Map.class).size(), equalTo(1)); + assertThat(document.getFieldValue("foo.bar", String.class), equalTo("baz1")); + + source = new HashMap<>(); + source.put("foo.bar.baz", "baz1"); + source.put("foo", new HashMap<>(Collections.singletonMap("bar", new HashMap<>()))); + document = new IngestDocument(source, Collections.emptyMap()); + processor = new DotExpanderProcessor("_tag", null, "foo.bar.baz"); + processor.execute(document); + assertThat(document.getFieldValue("foo", Map.class).size(), equalTo(1)); + assertThat(document.getFieldValue("foo.bar", Map.class).size(), equalTo(1)); + assertThat(document.getFieldValue("foo.bar.baz", String.class), equalTo("baz1")); + + source = new HashMap<>(); + source.put("foo.bar.baz", "baz1"); + source.put("foo", new HashMap<>(Collections.singletonMap("bar", "baz2"))); + IngestDocument document2 = new IngestDocument(source, Collections.emptyMap()); + Processor processor2 = new DotExpanderProcessor("_tag", null, "foo.bar.baz"); + e = expectThrows(IllegalArgumentException.class, () -> processor2.execute(document2)); + assertThat(e.getMessage(), equalTo("cannot expend [foo.bar.baz], because [foo.bar] is not an object field, but a value field")); + } + + public void testEscapeFields_path() throws Exception { + Map source = new HashMap<>(); + source.put("foo", new HashMap<>(Collections.singletonMap("bar.baz", "value"))); + IngestDocument document = new IngestDocument(source, Collections.emptyMap()); + DotExpanderProcessor processor = new DotExpanderProcessor("_tag", "foo", "bar.baz"); + processor.execute(document); + assertThat(document.getFieldValue("foo", Map.class).size(), equalTo(1)); + assertThat(document.getFieldValue("foo.bar", Map.class).size(), equalTo(1)); + assertThat(document.getFieldValue("foo.bar.baz", String.class), equalTo("value")); + + source = new HashMap<>(); + source.put("field", new HashMap<>(Collections.singletonMap("foo.bar.baz", "value"))); + document = new IngestDocument(source, Collections.emptyMap()); + processor = new DotExpanderProcessor("_tag", "field", "foo.bar.baz"); + processor.execute(document); + assertThat(document.getFieldValue("field.foo", Map.class).size(), equalTo(1)); + assertThat(document.getFieldValue("field.foo.bar", Map.class).size(), equalTo(1)); + assertThat(document.getFieldValue("field.foo.bar.baz", String.class), equalTo("value")); + } + +} diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/10_basic.yaml b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/10_basic.yaml index 14f58369dfa..e37b2d83183 100644 --- a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/10_basic.yaml +++ b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/10_basic.yaml @@ -13,17 +13,18 @@ - match: { nodes.$master.ingest.processors.1.type: convert } - match: { nodes.$master.ingest.processors.2.type: date } - match: { nodes.$master.ingest.processors.3.type: date_index_name } - - match: { nodes.$master.ingest.processors.4.type: fail } - - match: { nodes.$master.ingest.processors.5.type: foreach } - - match: { nodes.$master.ingest.processors.6.type: grok } - - match: { nodes.$master.ingest.processors.7.type: gsub } - - match: { nodes.$master.ingest.processors.8.type: join } - - match: { nodes.$master.ingest.processors.9.type: lowercase } - - match: { nodes.$master.ingest.processors.10.type: remove } - - match: { nodes.$master.ingest.processors.11.type: rename } - - match: { nodes.$master.ingest.processors.12.type: script } - - match: { nodes.$master.ingest.processors.13.type: set } - - match: { nodes.$master.ingest.processors.14.type: sort } - - match: { nodes.$master.ingest.processors.15.type: split } - - match: { nodes.$master.ingest.processors.16.type: trim } - - match: { nodes.$master.ingest.processors.17.type: uppercase } + - match: { nodes.$master.ingest.processors.4.type: dot_expander } + - match: { nodes.$master.ingest.processors.5.type: fail } + - match: { nodes.$master.ingest.processors.6.type: foreach } + - match: { nodes.$master.ingest.processors.7.type: grok } + - match: { nodes.$master.ingest.processors.8.type: gsub } + - match: { nodes.$master.ingest.processors.9.type: join } + - match: { nodes.$master.ingest.processors.10.type: lowercase } + - match: { nodes.$master.ingest.processors.11.type: remove } + - match: { nodes.$master.ingest.processors.12.type: rename } + - match: { nodes.$master.ingest.processors.13.type: script } + - match: { nodes.$master.ingest.processors.14.type: set } + - match: { nodes.$master.ingest.processors.15.type: sort } + - match: { nodes.$master.ingest.processors.16.type: split } + - match: { nodes.$master.ingest.processors.17.type: trim } + - match: { nodes.$master.ingest.processors.18.type: uppercase } diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/130_escape_dot.yaml b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/130_escape_dot.yaml new file mode 100644 index 00000000000..1d537ffa6b7 --- /dev/null +++ b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/130_escape_dot.yaml @@ -0,0 +1,40 @@ +--- +teardown: + - do: + ingest.delete_pipeline: + id: "1" + ignore: 404 + +--- +"Test escape_dot processor": + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "dot_expander" : { + "field" : "foo.bar" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + type: test + id: 1 + pipeline: "1" + body: { + foo.bar: "baz" + } + + - do: + get: + index: test + type: test + id: 1 + - match: { _source.foo.bar: "baz" } diff --git a/modules/lang-groovy/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java b/modules/lang-groovy/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java index 6216ec2354e..7f390ade088 100644 --- a/modules/lang-groovy/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java +++ b/modules/lang-groovy/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java @@ -23,10 +23,14 @@ import groovy.lang.Binding; import groovy.lang.GroovyClassLoader; import groovy.lang.GroovyCodeSource; import groovy.lang.Script; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Scorer; import org.codehaus.groovy.ast.ClassCodeExpressionTransformer; import org.codehaus.groovy.ast.ClassNode; +import org.codehaus.groovy.ast.Parameter; import org.codehaus.groovy.ast.expr.ConstantExpression; import org.codehaus.groovy.ast.expr.Expression; import org.codehaus.groovy.classgen.GeneratorContext; @@ -43,7 +47,6 @@ import org.elasticsearch.bootstrap.BootstrapInfo; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.hash.MessageDigests; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.script.ClassPermission; import org.elasticsearch.script.CompiledScript; @@ -93,6 +96,9 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri public GroovyScriptEngineService(Settings settings) { super(settings); + + deprecationLogger.deprecated("[groovy] scripts are deprecated, use [painless] scripts instead"); + // Creates the classloader here in order to isolate Groovy-land code final SecurityManager sm = System.getSecurityManager(); if (sm != null) { @@ -179,6 +185,8 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri @Override public ExecutableScript executable(CompiledScript compiledScript, Map vars) { + deprecationLogger.deprecated("[groovy] scripts are deprecated, use [painless] scripts instead"); + try { Map allVars = new HashMap<>(); if (vars != null) { @@ -192,6 +200,8 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri @Override public SearchScript search(final CompiledScript compiledScript, final SearchLookup lookup, @Nullable final Map vars) { + deprecationLogger.deprecated("[groovy] scripts are deprecated, use [painless] scripts instead"); + return new SearchScript() { @Override @@ -248,14 +258,14 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri private final Script script; private final LeafSearchLookup lookup; private final Map variables; - private final ESLogger logger; + private final Logger logger; - public GroovyScript(CompiledScript compiledScript, Script script, ESLogger logger) { + public GroovyScript(CompiledScript compiledScript, Script script, Logger logger) { this(compiledScript, script, null, logger); } @SuppressWarnings("unchecked") - public GroovyScript(CompiledScript compiledScript, Script script, @Nullable LeafSearchLookup lookup, ESLogger logger) { + public GroovyScript(CompiledScript compiledScript, Script script, @Nullable LeafSearchLookup lookup, Logger logger) { this.compiledScript = compiledScript; this.script = script; this.lookup = lookup; @@ -299,7 +309,7 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri // resulting in the uncaughtExceptionHandler handling it. final StackTraceElement[] elements = ae.getStackTrace(); if (elements.length > 0 && "org.codehaus.groovy.runtime.InvokerHelper".equals(elements[0].getClassName())) { - logger.trace("failed to run {}", ae, compiledScript); + logger.trace((Supplier) () -> new ParameterizedMessage("failed to run {}", compiledScript), ae); throw new ScriptException("Error evaluating " + compiledScript.name(), ae, emptyList(), "", compiledScript.lang()); } diff --git a/modules/lang-mustache/build.gradle b/modules/lang-mustache/build.gradle index 011f949c860..8fed78aca32 100644 --- a/modules/lang-mustache/build.gradle +++ b/modules/lang-mustache/build.gradle @@ -24,7 +24,7 @@ esplugin { } dependencies { - compile "com.github.spullara.mustache.java:compiler:0.9.1" + compile "com.github.spullara.mustache.java:compiler:0.9.3" } integTest { diff --git a/modules/lang-mustache/licenses/compiler-0.9.1.jar.sha1 b/modules/lang-mustache/licenses/compiler-0.9.1.jar.sha1 deleted file mode 100644 index 96152e075b3..00000000000 --- a/modules/lang-mustache/licenses/compiler-0.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -14aec5344639782ee76441401b773946c65eb2b3 diff --git a/modules/lang-mustache/licenses/compiler-0.9.3.jar.sha1 b/modules/lang-mustache/licenses/compiler-0.9.3.jar.sha1 new file mode 100644 index 00000000000..2b0fbbc542e --- /dev/null +++ b/modules/lang-mustache/licenses/compiler-0.9.3.jar.sha1 @@ -0,0 +1 @@ +2815e016c63bec4f18704ea4f5489106a5b01a99 \ No newline at end of file diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java index 66ecf23fa02..b7d7087373c 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java @@ -20,6 +20,8 @@ package org.elasticsearch.script.mustache; import com.github.mustachejava.Mustache; import com.github.mustachejava.MustacheFactory; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.SpecialPermission; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractComponent; @@ -165,7 +167,7 @@ public final class MustacheScriptEngineService extends AbstractComponent impleme return null; }); } catch (Exception e) { - logger.error("Error running {}", e, template); + logger.error((Supplier) () -> new ParameterizedMessage("Error running {}", template), e); throw new GeneralScriptException("Error running " + template, e); } return result.bytes(); diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java index 3d5965b3586..09e8afbca0b 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java @@ -515,7 +515,8 @@ public class PercolateQueryBuilder extends AbstractQueryBuilder> nodePlugins() { - return Collections.singleton(PercolatorPlugin.class); + return Arrays.asList(PercolatorPlugin.class, FoolMeScriptLang.class); } @Override @@ -81,25 +88,43 @@ public class PercolatorBackwardsCompatibilityTests extends ESIntegTestCase { .setTypes(".percolator") .addSort("_uid", SortOrder.ASC) .get(); - assertThat(searchResponse.getHits().getTotalHits(), equalTo(3L)); + assertThat(searchResponse.getHits().getTotalHits(), equalTo(4L)); assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1")); assertThat(searchResponse.getHits().getAt(1).id(), equalTo("2")); assertThat(searchResponse.getHits().getAt(2).id(), equalTo("3")); + assertThat(searchResponse.getHits().getAt(3).id(), equalTo("4")); + assertThat(XContentMapValues.extractValue("query.script.script.inline", + searchResponse.getHits().getAt(3).sourceAsMap()), equalTo("return true")); + // we don't upgrade the script definitions so that they include explicitly the lang, + // because we read / parse the query at search time. + assertThat(XContentMapValues.extractValue("query.script.script.lang", + searchResponse.getHits().getAt(3).sourceAsMap()), nullValue()); // verify percolate response PercolateResponse percolateResponse = preparePercolate(client()) + .setIndices(INDEX_NAME) + .setDocumentType("message") + .setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc("{}")) + .get(); + + assertThat(percolateResponse.getCount(), equalTo(1L)); + assertThat(percolateResponse.getMatches().length, equalTo(1)); + assertThat(percolateResponse.getMatches()[0].getId().string(), equalTo("4")); + + percolateResponse = preparePercolate(client()) .setIndices(INDEX_NAME) .setDocumentType("message") .setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc("message", "the quick brown fox jumps over the lazy dog")) .get(); - assertThat(percolateResponse.getCount(), equalTo(2L)); - assertThat(percolateResponse.getMatches().length, equalTo(2)); + assertThat(percolateResponse.getCount(), equalTo(3L)); + assertThat(percolateResponse.getMatches().length, equalTo(3)); assertThat(percolateResponse.getMatches()[0].getId().string(), equalTo("1")); assertThat(percolateResponse.getMatches()[1].getId().string(), equalTo("2")); + assertThat(percolateResponse.getMatches()[2].getId().string(), equalTo("4")); // add an extra query and verify the results - client().prepareIndex(INDEX_NAME, ".percolator", "4") + client().prepareIndex(INDEX_NAME, ".percolator", "5") .setSource(jsonBuilder().startObject().field("query", matchQuery("message", "fox jumps")).endObject()) .get(); refresh(); @@ -110,8 +135,8 @@ public class PercolatorBackwardsCompatibilityTests extends ESIntegTestCase { .setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc("message", "the quick brown fox jumps over the lazy dog")) .get(); - assertThat(percolateResponse.getCount(), equalTo(3L)); - assertThat(percolateResponse.getMatches().length, equalTo(3)); + assertThat(percolateResponse.getCount(), equalTo(4L)); + assertThat(percolateResponse.getMatches().length, equalTo(4)); assertThat(percolateResponse.getMatches()[0].getId().string(), equalTo("1")); assertThat(percolateResponse.getMatches()[1].getId().string(), equalTo("2")); assertThat(percolateResponse.getMatches()[2].getId().string(), equalTo("4")); @@ -131,4 +156,19 @@ public class PercolatorBackwardsCompatibilityTests extends ESIntegTestCase { ensureGreen(INDEX_NAME); } + // Fool the script service that this is the groovy script language, + // so that we can run a script that has no lang defined implicetely against the legacy language: + public static class FoolMeScriptLang extends MockScriptPlugin { + + @Override + protected Map, Object>> pluginScripts() { + return Collections.singletonMap("return true", (vars) -> true); + } + + @Override + public String pluginScriptLang() { + return "groovy"; + } + } + } diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java index df1e6ea6f8c..621cb07d3cd 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java @@ -36,10 +36,14 @@ import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; @@ -61,6 +65,8 @@ import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder; import org.elasticsearch.index.query.functionscore.RandomScoreFunctionBuilder; import org.elasticsearch.indices.TermsLookup; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.script.MockScriptPlugin; +import org.elasticsearch.script.Script; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.VersionUtils; @@ -72,6 +78,8 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.Map; +import java.util.function.Function; import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; @@ -100,7 +108,7 @@ public class PercolatorFieldMapperTests extends ESSingleNodeTestCase { @Override protected Collection> getPlugins() { - return pluginList(InternalSettingsPlugin.class, PercolatorPlugin.class); + return pluginList(InternalSettingsPlugin.class, PercolatorPlugin.class, FoolMeScriptPlugin.class); } @Before @@ -493,4 +501,71 @@ public class PercolatorFieldMapperTests extends ESSingleNodeTestCase { DocumentMapper defaultMapper = parser2x.parse("type1", new CompressedXContent(mapping)); assertEquals(mapping, defaultMapper.mappingSource().string()); } + + public void testImplicitlySetDefaultScriptLang() throws Exception { + addQueryMapping(); + XContentBuilder query = jsonBuilder(); + query.startObject(); + query.startObject("script"); + if (randomBoolean()) { + query.field("script", "return true"); + } else { + query.startObject("script"); + query.field("inline", "return true"); + query.endObject(); + } + query.endObject(); + query.endObject(); + + ParsedDocument doc = mapperService.documentMapper(typeName).parse("test", typeName, "1", + XContentFactory.jsonBuilder().startObject() + .rawField(fieldName, new BytesArray(query.string())) + .endObject().bytes()); + BytesRef querySource = doc.rootDoc().getFields(fieldType.queryBuilderField.name())[0].binaryValue(); + Map parsedQuery = XContentHelper.convertToMap(new BytesArray(querySource), true).v2(); + assertEquals(Script.DEFAULT_SCRIPT_LANG, XContentMapValues.extractValue("script.script.lang", parsedQuery)); + + query = jsonBuilder(); + query.startObject(); + query.startObject("function_score"); + query.startArray("functions"); + query.startObject(); + query.startObject("script_score"); + if (randomBoolean()) { + query.field("script", "return true"); + } else { + query.startObject("script"); + query.field("inline", "return true"); + query.endObject(); + } + query.endObject(); + query.endObject(); + query.endArray(); + query.endObject(); + query.endObject(); + + doc = mapperService.documentMapper(typeName).parse("test", typeName, "1", + XContentFactory.jsonBuilder().startObject() + .rawField(fieldName, new BytesArray(query.string())) + .endObject().bytes()); + querySource = doc.rootDoc().getFields(fieldType.queryBuilderField.name())[0].binaryValue(); + parsedQuery = XContentHelper.convertToMap(new BytesArray(querySource), true).v2(); + assertEquals(Script.DEFAULT_SCRIPT_LANG, + ((List) XContentMapValues.extractValue("function_score.functions.script_score.script.lang", parsedQuery)).get(0)); + } + + // Just so that we store scripts in percolator queries, but not really execute these scripts. + public static class FoolMeScriptPlugin extends MockScriptPlugin { + + @Override + protected Map, Object>> pluginScripts() { + return Collections.singletonMap("return true", (vars) -> true); + } + + @Override + public String pluginScriptLang() { + return Script.DEFAULT_SCRIPT_LANG; + } + } + } diff --git a/modules/percolator/src/test/resources/indices/percolator/bwc_index_2.0.0.zip b/modules/percolator/src/test/resources/indices/percolator/bwc_index_2.0.0.zip index f0e2d05e4af..43a8cceb193 100644 Binary files a/modules/percolator/src/test/resources/indices/percolator/bwc_index_2.0.0.zip and b/modules/percolator/src/test/resources/indices/percolator/bwc_index_2.0.0.zip differ diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java index 0178d2e1fb6..32824e969d9 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.reindex; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; @@ -31,7 +32,6 @@ import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.bulk.Retry; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.client.ParentTaskAssigningClient; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; @@ -64,7 +64,7 @@ import static org.elasticsearch.search.sort.SortBuilders.fieldSort; * their tests can use them. Most methods run in the listener thread pool because the are meant to be fast and don't expect to block. */ public abstract class AbstractAsyncBulkByScrollAction> { - protected final ESLogger logger; + protected final Logger logger; protected final BulkByScrollTask task; protected final ThreadPool threadPool; /** @@ -81,7 +81,7 @@ public abstract class AbstractAsyncBulkByScrollAction listener) { this.task = task; this.logger = logger; diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollAction.java index ed5211da141..f441e527222 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollAction.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.reindex; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.bulk.BulkRequest; @@ -26,7 +27,6 @@ import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.client.ParentTaskAssigningClient; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.IdFieldMapper; @@ -71,7 +71,7 @@ public abstract class AbstractAsyncBulkIndexByScrollAction, ScrollableHitSource.Hit, RequestWrapper> scriptApplier; - public AbstractAsyncBulkIndexByScrollAction(BulkByScrollTask task, ESLogger logger, ParentTaskAssigningClient client, + public AbstractAsyncBulkIndexByScrollAction(BulkByScrollTask task, Logger logger, ParentTaskAssigningClient client, ThreadPool threadPool, Request mainRequest, ActionListener listener, ScriptService scriptService, ClusterState clusterState) { diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java index a547c5303bc..ade1f8c2f84 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java @@ -19,10 +19,10 @@ package org.elasticsearch.index.reindex; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; @@ -49,7 +49,7 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueNanos; * Task storing information about a currently running BulkByScroll request. */ public class BulkByScrollTask extends CancellableTask { - private static final ESLogger logger = ESLoggerFactory.getLogger(BulkByScrollTask.class.getPackage().getName()); + private static final Logger logger = ESLoggerFactory.getLogger(BulkByScrollTask.class.getPackage().getName()); /** * The total number of documents this request will process. 0 means we don't yet know or, possibly, there are actually 0 documents diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java index 030753e9414..4d5f7623400 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java @@ -19,6 +19,9 @@ package org.elasticsearch.index.reindex; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.bulk.BackoffPolicy; @@ -31,7 +34,6 @@ import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.client.Client; import org.elasticsearch.client.ParentTaskAssigningClient; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; @@ -60,7 +62,7 @@ public class ClientScrollableHitSource extends ScrollableHitSource { private final ParentTaskAssigningClient client; private final SearchRequest firstSearchRequest; - public ClientScrollableHitSource(ESLogger logger, BackoffPolicy backoffPolicy, ThreadPool threadPool, Runnable countSearchRetry, + public ClientScrollableHitSource(Logger logger, BackoffPolicy backoffPolicy, ThreadPool threadPool, Runnable countSearchRetry, Consumer fail, ParentTaskAssigningClient client, SearchRequest firstSearchRequest) { super(logger, backoffPolicy, threadPool, countSearchRetry, fail); this.client = client; @@ -105,7 +107,7 @@ public class ClientScrollableHitSource extends ScrollableHitSource { @Override public void onFailure(Exception e) { - logger.warn("Failed to clear scroll [{}]", e, scrollId); + logger.warn((Supplier) () -> new ParameterizedMessage("Failed to clear scroll [{}]", scrollId), e); } }); } @@ -144,11 +146,13 @@ public class ClientScrollableHitSource extends ScrollableHitSource { if (retries.hasNext()) { retryCount += 1; TimeValue delay = retries.next(); - logger.trace("retrying rejected search after [{}]", e, delay); + logger.trace((Supplier) () -> new ParameterizedMessage("retrying rejected search after [{}]", delay), e); countSearchRetry.run(); threadPool.schedule(delay, ThreadPool.Names.SAME, this); } else { - logger.warn("giving up on search because we retried [{}] times without success", e, retryCount); + logger.warn( + (Supplier) () -> new ParameterizedMessage( + "giving up on search because we retried [{}] times without success", retryCount), e); fail.accept(e); } } else { @@ -242,7 +246,7 @@ public class ClientScrollableHitSource extends ScrollableHitSource { public Long getTTL() { return fieldValue(TTLFieldMapper.NAME); } - + private T fieldValue(String fieldName) { SearchHitField field = delegate.field(fieldName); return field == null ? null : field.value(); diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ScrollableHitSource.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ScrollableHitSource.java index da601fca08c..0b4b66222bc 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ScrollableHitSource.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ScrollableHitSource.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.reindex; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.bulk.BackoffPolicy; import org.elasticsearch.action.search.ShardSearchFailure; @@ -28,7 +29,6 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -50,13 +50,13 @@ import static java.util.Objects.requireNonNull; public abstract class ScrollableHitSource implements Closeable { private final AtomicReference scrollId = new AtomicReference<>(); - protected final ESLogger logger; + protected final Logger logger; protected final BackoffPolicy backoffPolicy; protected final ThreadPool threadPool; protected final Runnable countSearchRetry; protected final Consumer fail; - public ScrollableHitSource(ESLogger logger, BackoffPolicy backoffPolicy, ThreadPool threadPool, Runnable countSearchRetry, + public ScrollableHitSource(Logger logger, BackoffPolicy backoffPolicy, ThreadPool threadPool, Runnable countSearchRetry, Consumer fail) { this.logger = logger; this.backoffPolicy = backoffPolicy; diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportDeleteByQueryAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportDeleteByQueryAction.java index df07bd34859..99362e75f9b 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportDeleteByQueryAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportDeleteByQueryAction.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.reindex; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.support.ActionFilters; @@ -29,7 +30,6 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.script.ScriptService; import org.elasticsearch.tasks.Task; @@ -68,7 +68,7 @@ public class TransportDeleteByQueryAction extends HandledTransportAction { - public AsyncDeleteBySearchAction(BulkByScrollTask task, ESLogger logger, ParentTaskAssigningClient client, ThreadPool threadPool, + public AsyncDeleteBySearchAction(BulkByScrollTask task, Logger logger, ParentTaskAssigningClient client, ThreadPool threadPool, DeleteByQueryRequest request, ActionListener listener, ScriptService scriptService, ClusterState clusterState) { super(task, logger, client, threadPool, request, listener, scriptService, clusterState); diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java index 57d29283bb3..33aca028351 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java @@ -27,6 +27,7 @@ import org.apache.http.client.CredentialsProvider; import org.apache.http.impl.client.BasicCredentialsProvider; import org.apache.http.impl.nio.reactor.IOReactorConfig; import org.apache.http.message.BasicHeader; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.bulk.BackoffPolicy; @@ -44,7 +45,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -226,7 +226,7 @@ public class TransportReindexAction extends HandledTransportAction createdThreads = emptyList(); - public AsyncIndexBySearchAction(BulkByScrollTask task, ESLogger logger, ParentTaskAssigningClient client, ThreadPool threadPool, + public AsyncIndexBySearchAction(BulkByScrollTask task, Logger logger, ParentTaskAssigningClient client, ThreadPool threadPool, ReindexRequest request, ActionListener listener, ScriptService scriptService, ClusterState clusterState) { super(task, logger, client, threadPool, request, listener, scriptService, clusterState); diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java index 79c013482e7..0f4bf5695d1 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.reindex; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.ActionFilters; @@ -29,7 +30,6 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.IdFieldMapper; @@ -81,7 +81,7 @@ public class TransportUpdateByQueryAction extends HandledTransportAction { - public AsyncIndexBySearchAction(BulkByScrollTask task, ESLogger logger, ParentTaskAssigningClient client, ThreadPool threadPool, + public AsyncIndexBySearchAction(BulkByScrollTask task, Logger logger, ParentTaskAssigningClient client, ThreadPool threadPool, UpdateByQueryRequest request, ActionListener listener, ScriptService scriptService, ClusterState clusterState) { super(task, logger, client, threadPool, request, listener, scriptService, clusterState); diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java index 572913493cf..207948c9215 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java @@ -21,6 +21,9 @@ package org.elasticsearch.index.reindex.remote; import org.apache.http.HttpEntity; import org.apache.http.util.EntityUtils; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.Version; @@ -34,7 +37,6 @@ import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParseFieldMatcherSupplier; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -70,7 +72,7 @@ public class RemoteScrollableHitSource extends ScrollableHitSource { private final SearchRequest searchRequest; Version remoteVersion; - public RemoteScrollableHitSource(ESLogger logger, BackoffPolicy backoffPolicy, ThreadPool threadPool, Runnable countSearchRetry, + public RemoteScrollableHitSource(Logger logger, BackoffPolicy backoffPolicy, ThreadPool threadPool, Runnable countSearchRetry, Consumer fail, RestClient client, BytesReference query, SearchRequest searchRequest) { super(logger, backoffPolicy, threadPool, countSearchRetry, fail); this.query = query; @@ -126,7 +128,7 @@ public class RemoteScrollableHitSource extends ScrollableHitSource { @Override public void onFailure(Exception t) { - logger.warn("Failed to clear scroll [{}]", t, scrollId); + logger.warn((Supplier) () -> new ParameterizedMessage("Failed to clear scroll [{}]", scrollId), t); } }); } @@ -173,7 +175,8 @@ public class RemoteScrollableHitSource extends ScrollableHitSource { if (RestStatus.TOO_MANY_REQUESTS.getStatus() == re.getResponse().getStatusLine().getStatusCode()) { if (retries.hasNext()) { TimeValue delay = retries.next(); - logger.trace("retrying rejected search after [{}]", e, delay); + logger.trace( + (Supplier) () -> new ParameterizedMessage("retrying rejected search after [{}]", delay), e); countSearchRetry.run(); threadPool.schedule(delay, ThreadPool.Names.SAME, RetryHelper.this); return; diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexSourceTargetValidationTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexSourceTargetValidationTests.java index 1213762155b..12ed0ed090f 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexSourceTargetValidationTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexSourceTargetValidationTests.java @@ -31,10 +31,13 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.reindex.remote.RemoteInfo; import org.elasticsearch.test.ESTestCase; +import java.util.Collections; + import static java.util.Collections.emptyMap; import static org.hamcrest.Matchers.containsString; @@ -54,7 +57,8 @@ public class ReindexSourceTargetValidationTests extends ESTestCase { .put(index("source", "source_multi"), true) .put(index("source2", "source_multi"), true)).build(); private static final IndexNameExpressionResolver INDEX_NAME_EXPRESSION_RESOLVER = new IndexNameExpressionResolver(Settings.EMPTY); - private static final AutoCreateIndex AUTO_CREATE_INDEX = new AutoCreateIndex(Settings.EMPTY, INDEX_NAME_EXPRESSION_RESOLVER); + private static final AutoCreateIndex AUTO_CREATE_INDEX = new AutoCreateIndex(Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), INDEX_NAME_EXPRESSION_RESOLVER); public void testObviousCases() { fails("target", "target"); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java index b0fc9b428ba..1a262a32d3d 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java @@ -206,14 +206,6 @@ public class RoundTripTests extends ESTestCase { emptyMap()); // Params } - private long randomPositiveLong() { - long l; - do { - l = randomLong(); - } while (l < 0); - return l; - } - private void assertResponseEquals(BulkIndexByScrollResponse expected, BulkIndexByScrollResponse actual) { assertEquals(expected.getTook(), actual.getTook()); assertTaskStatusEquals(expected.getStatus(), actual.getStatus()); diff --git a/modules/transport-netty3/build.gradle b/modules/transport-netty3/build.gradle index e13170e5c29..eae0608f92f 100644 --- a/modules/transport-netty3/build.gradle +++ b/modules/transport-netty3/build.gradle @@ -123,5 +123,5 @@ thirdPartyAudit.excludes = [ // from org.jboss.netty.util.internal.logging.InternalLoggerFactory (netty) - it's optional 'org.slf4j.Logger', - 'org.slf4j.LoggerFactory', + 'org.slf4j.LoggerFactory' ] diff --git a/modules/transport-netty3/src/main/java/org/elasticsearch/http/netty3/Netty3HttpServerTransport.java b/modules/transport-netty3/src/main/java/org/elasticsearch/http/netty3/Netty3HttpServerTransport.java index d12636de5cb..df5c02c2d32 100644 --- a/modules/transport-netty3/src/main/java/org/elasticsearch/http/netty3/Netty3HttpServerTransport.java +++ b/modules/transport-netty3/src/main/java/org/elasticsearch/http/netty3/Netty3HttpServerTransport.java @@ -21,6 +21,8 @@ package org.elasticsearch.http.netty3; import com.carrotsearch.hppc.IntHashSet; import com.carrotsearch.hppc.IntSet; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; @@ -495,10 +497,18 @@ public class Netty3HttpServerTransport extends AbstractLifecycleComponent implem return; } if (!NetworkExceptionHelper.isCloseConnectionException(e.getCause())) { - logger.warn("Caught exception while handling client http traffic, closing connection {}", e.getCause(), ctx.getChannel()); + logger.warn( + (Supplier) () -> new ParameterizedMessage( + "Caught exception while handling client http traffic, closing connection {}", + ctx.getChannel()), + e.getCause()); ctx.getChannel().close(); } else { - logger.debug("Caught exception while handling client http traffic, closing connection {}", e.getCause(), ctx.getChannel()); + logger.debug( + (Supplier) () -> new ParameterizedMessage( + "Caught exception while handling client http traffic, closing connection {}", + ctx.getChannel()), + e.getCause()); ctx.getChannel().close(); } } diff --git a/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3InternalESLogger.java b/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3InternalESLogger.java index 6ff941c48e7..03c9671ad78 100644 --- a/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3InternalESLogger.java +++ b/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3InternalESLogger.java @@ -19,8 +19,8 @@ package org.elasticsearch.transport.netty3; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.SuppressLoggerChecks; -import org.elasticsearch.common.logging.ESLogger; import org.jboss.netty.logging.AbstractInternalLogger; /** @@ -29,9 +29,9 @@ import org.jboss.netty.logging.AbstractInternalLogger; @SuppressLoggerChecks(reason = "safely delegates to logger") final class Netty3InternalESLogger extends AbstractInternalLogger { - private final ESLogger logger; + private final Logger logger; - Netty3InternalESLogger(ESLogger logger) { + Netty3InternalESLogger(Logger logger) { this.logger = logger; } diff --git a/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3OpenChannelsHandler.java b/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3OpenChannelsHandler.java index 6a7732723c4..bbfb775d0e7 100644 --- a/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3OpenChannelsHandler.java +++ b/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3OpenChannelsHandler.java @@ -19,8 +19,8 @@ package org.elasticsearch.transport.netty3; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.jboss.netty.channel.Channel; @@ -42,9 +42,9 @@ public class Netty3OpenChannelsHandler implements ChannelUpstreamHandler, Releas final CounterMetric openChannelsMetric = new CounterMetric(); final CounterMetric totalChannelsMetric = new CounterMetric(); - final ESLogger logger; + final Logger logger; - public Netty3OpenChannelsHandler(ESLogger logger) { + public Netty3OpenChannelsHandler(Logger logger) { this.logger = logger; } diff --git a/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3Transport.java b/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3Transport.java index 71a9ca25d4d..6f1f7069860 100644 --- a/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3Transport.java +++ b/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3Transport.java @@ -19,13 +19,14 @@ package org.elasticsearch.transport.netty3; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.network.NetworkService.TcpSettings; @@ -46,7 +47,6 @@ import org.elasticsearch.transport.TransportServiceAdapter; import org.elasticsearch.transport.TransportSettings; import org.jboss.netty.bootstrap.ClientBootstrap; import org.jboss.netty.bootstrap.ServerBootstrap; -import org.jboss.netty.buffer.ChannelBuffer; import org.jboss.netty.channel.AdaptiveReceiveBufferSizePredictorFactory; import org.jboss.netty.channel.Channel; import org.jboss.netty.channel.ChannelFuture; @@ -554,7 +554,7 @@ public class Netty3Transport extends TcpTransport { try { serverBootstrap.releaseExternalResources(); } catch (Exception e) { - logger.debug("Error closing serverBootstrap for profile [{}]", e, name); + logger.debug((Supplier) () -> new ParameterizedMessage("Error closing serverBootstrap for profile [{}]", name), e); } } serverBootstraps.clear(); diff --git a/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3Utils.java b/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3Utils.java index 17a367735d4..2cbf92997b4 100644 --- a/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3Utils.java +++ b/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3Utils.java @@ -18,6 +18,8 @@ */ package org.elasticsearch.transport.netty3; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefIterator; import org.elasticsearch.common.SuppressForbidden; @@ -121,7 +123,9 @@ public class Netty3Utils { } }); } catch (final SecurityException e) { - Loggers.getLogger(Netty3Utils.class).debug("Unable to get/set System Property: {}", e, key); + Loggers + .getLogger(Netty3Utils.class) + .debug((Supplier) () -> new ParameterizedMessage("Unable to get/set System Property: {}", key), e); } } } diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index 5012b3a1634..4fc38bc3947 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -125,8 +125,6 @@ thirdPartyAudit.excludes = [ 'net.jpountz.lz4.LZ4FastDecompressor', 'net.jpountz.xxhash.StreamingXXHash32', 'net.jpountz.xxhash.XXHashFactory', - 'org.apache.logging.log4j.LogManager', - 'org.apache.logging.log4j.Logger', 'org.apache.tomcat.Apr', 'org.apache.tomcat.jni.CertificateRequestedCallback', 'org.apache.tomcat.jni.CertificateRequestedCallback$KeyMaterial', diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java index 1a6e8ae60bf..96795cec3e0 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java @@ -43,6 +43,8 @@ import io.netty.handler.codec.http.HttpObjectAggregator; import io.netty.handler.codec.http.HttpRequestDecoder; import io.netty.handler.codec.http.HttpResponseEncoder; import io.netty.handler.timeout.ReadTimeoutException; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; @@ -512,10 +514,16 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem return; } if (!NetworkExceptionHelper.isCloseConnectionException(cause)) { - logger.warn("caught exception while handling client http traffic, closing connection {}", cause, ctx.channel()); + logger.warn( + (Supplier) () -> new ParameterizedMessage( + "caught exception while handling client http traffic, closing connection {}", ctx.channel()), + cause); ctx.channel().close(); } else { - logger.debug("caught exception while handling client http traffic, closing connection {}", cause, ctx.channel()); + logger.debug( + (Supplier) () -> new ParameterizedMessage( + "caught exception while handling client http traffic, closing connection {}", ctx.channel()), + cause); ctx.channel().close(); } } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4InternalESLogger.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4InternalESLogger.java index 61555294018..aaa277e34b3 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4InternalESLogger.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4InternalESLogger.java @@ -20,14 +20,14 @@ package org.elasticsearch.transport.netty4; import io.netty.util.internal.logging.AbstractInternalLogger; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.SuppressLoggerChecks; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; @SuppressLoggerChecks(reason = "safely delegates to logger") class Netty4InternalESLogger extends AbstractInternalLogger { - private final ESLogger logger; + private final Logger logger; Netty4InternalESLogger(final String name) { super(name); diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4OpenChannelsHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4OpenChannelsHandler.java index 0562a0d4661..2270c90967f 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4OpenChannelsHandler.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4OpenChannelsHandler.java @@ -25,16 +25,14 @@ import io.netty.channel.ChannelFutureListener; import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelInboundHandlerAdapter; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.metrics.CounterMetric; import java.io.IOException; import java.util.Collections; -import java.util.List; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; -import java.util.stream.Collectors; @ChannelHandler.Sharable public class Netty4OpenChannelsHandler extends ChannelInboundHandlerAdapter implements Releasable { @@ -43,9 +41,9 @@ public class Netty4OpenChannelsHandler extends ChannelInboundHandlerAdapter impl final CounterMetric openChannelsMetric = new CounterMetric(); final CounterMetric totalChannelsMetric = new CounterMetric(); - final ESLogger logger; + final Logger logger; - public Netty4OpenChannelsHandler(ESLogger logger) { + public Netty4OpenChannelsHandler(Logger logger) { this.logger = logger; } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java index 9de5a31dbe6..b06e5bb48f7 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java @@ -33,12 +33,13 @@ import io.netty.channel.FixedRecvByteBufAllocator; import io.netty.channel.RecvByteBufAllocator; import io.netty.channel.nio.NioEventLoopGroup; import io.netty.channel.oio.OioEventLoopGroup; -import io.netty.channel.socket.SocketChannel; import io.netty.channel.socket.nio.NioServerSocketChannel; import io.netty.channel.socket.nio.NioSocketChannel; import io.netty.channel.socket.oio.OioServerSocketChannel; import io.netty.channel.socket.oio.OioSocketChannel; import io.netty.util.concurrent.Future; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -495,7 +496,9 @@ public class Netty4Transport extends TcpTransport { for (final Tuple> future : serverBootstrapCloseFutures) { future.v2().awaitUninterruptibly(); if (!future.v2().isSuccess()) { - logger.debug("Error closing server bootstrap for profile [{}]", future.v2().cause(), future.v1()); + logger.debug( + (Supplier) () -> new ParameterizedMessage( + "Error closing server bootstrap for profile [{}]", future.v1()), future.v2().cause()); } } serverBootstraps.clear(); diff --git a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic/AzureDiscoveryModule.java b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic/AzureDiscoveryModule.java index da684fd824d..7ee62dd8776 100644 --- a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic/AzureDiscoveryModule.java +++ b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic/AzureDiscoveryModule.java @@ -19,13 +19,13 @@ package org.elasticsearch.cloud.azure.classic; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cloud.azure.classic.management.AzureComputeService; import org.elasticsearch.cloud.azure.classic.management.AzureComputeServiceImpl; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -43,7 +43,7 @@ import org.elasticsearch.plugin.discovery.azure.classic.AzureDiscoveryPlugin; * @see AzureComputeServiceImpl */ public class AzureDiscoveryModule extends AbstractModule { - protected final ESLogger logger; + protected final Logger logger; private Settings settings; // pkg private so it is settable by tests @@ -69,7 +69,7 @@ public class AzureDiscoveryModule extends AbstractModule { * Check if discovery is meant to start * @return true if we can start discovery features */ - public static boolean isDiscoveryReady(Settings settings, ESLogger logger) { + public static boolean isDiscoveryReady(Settings settings, Logger logger) { // User set discovery.type: azure if (!AzureDiscoveryPlugin.AZURE.equalsIgnoreCase(DiscoveryModule.DISCOVERY_TYPE_SETTING.get(settings))) { logger.trace("discovery.type not set to {}", AzureDiscoveryPlugin.AZURE); diff --git a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java index 4c0ac173315..db5c1cc5c42 100644 --- a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java +++ b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java @@ -19,11 +19,11 @@ package org.elasticsearch.plugin.discovery.azure.classic; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cloud.azure.classic.AzureDiscoveryModule; import org.elasticsearch.cloud.azure.classic.management.AzureComputeService; import org.elasticsearch.common.inject.Module; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -41,7 +41,7 @@ public class AzureDiscoveryPlugin extends Plugin { public static final String AZURE = "azure"; private final Settings settings; - protected final ESLogger logger = Loggers.getLogger(AzureDiscoveryPlugin.class); + protected final Logger logger = Loggers.getLogger(AzureDiscoveryPlugin.class); public AzureDiscoveryPlugin(Settings settings) { this.settings = settings; diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index 506215708e2..9ad2f8d02e2 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -42,6 +42,12 @@ dependencyLicenses { mapping from: /jackson-.*/, to: 'jackson' } +bundlePlugin { + from('config/discovery-ec2') { + into 'config' + } +} + test { // this is needed for insecure plugins, remove if possible! systemProperty 'tests.artifact', project.name diff --git a/plugins/discovery-ec2/config/discovery-ec2/log4j2.properties b/plugins/discovery-ec2/config/discovery-ec2/log4j2.properties new file mode 100644 index 00000000000..aa52f0232e0 --- /dev/null +++ b/plugins/discovery-ec2/config/discovery-ec2/log4j2.properties @@ -0,0 +1,8 @@ +logger.com_amazonaws.name = com.amazonaws +logger.com_amazonaws.level = warn + +logger.com_amazonaws_jmx_SdkMBeanRegistrySupport.name = com.amazonaws.jmx.SdkMBeanRegistrySupport +logger.com_amazonaws_jmx_SdkMBeanRegistrySupport.level = error + +logger.com_amazonaws_metrics_AwsSdkMetrics.name = com.amazonaws.metrics.AwsSdkMetrics +logger.com_amazonaws_metrics_AwsSdkMetrics.level = error diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2ServiceImpl.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2ServiceImpl.java index e35b082899e..1a4bf278f3a 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2ServiceImpl.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2ServiceImpl.java @@ -30,14 +30,12 @@ import com.amazonaws.internal.StaticCredentialsProvider; import com.amazonaws.retry.RetryPolicy; import com.amazonaws.services.ec2.AmazonEC2; import com.amazonaws.services.ec2.AmazonEC2Client; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.cloud.aws.network.Ec2NameResolver; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; import java.util.Random; @@ -71,7 +69,7 @@ public class AwsEc2ServiceImpl extends AbstractLifecycleComponent implements Aws return this.client; } - protected static AWSCredentialsProvider buildCredentials(ESLogger logger, Settings settings) { + protected static AWSCredentialsProvider buildCredentials(Logger logger, Settings settings) { AWSCredentialsProvider credentials; String key = CLOUD_EC2.KEY_SETTING.get(settings); @@ -87,7 +85,7 @@ public class AwsEc2ServiceImpl extends AbstractLifecycleComponent implements Aws return credentials; } - protected static ClientConfiguration buildConfiguration(ESLogger logger, Settings settings) { + protected static ClientConfiguration buildConfiguration(Logger logger, Settings settings) { ClientConfiguration clientConfiguration = new ClientConfiguration(); // the response metadata cache is only there for diagnostics purposes, // but can force objects from every response to the old generation. @@ -135,7 +133,7 @@ public class AwsEc2ServiceImpl extends AbstractLifecycleComponent implements Aws return clientConfiguration; } - protected static String findEndpoint(ESLogger logger, Settings settings) { + protected static String findEndpoint(Logger logger, Settings settings) { String endpoint = null; if (CLOUD_EC2.ENDPOINT_SETTING.exists(settings)) { endpoint = CLOUD_EC2.ENDPOINT_SETTING.get(settings); diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsSigner.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsSigner.java index a76a2b04a91..11732725e9d 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsSigner.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsSigner.java @@ -21,12 +21,12 @@ package org.elasticsearch.cloud.aws; import com.amazonaws.ClientConfiguration; import com.amazonaws.auth.SignerFactory; -import org.elasticsearch.common.logging.ESLogger; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.Loggers; public class AwsSigner { - private static final ESLogger logger = Loggers.getLogger(AwsSigner.class); + private static final Logger logger = Loggers.getLogger(AwsSigner.class); private AwsSigner() { diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java index 1f3043fe6dd..6d4fcdc4c8d 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java @@ -27,6 +27,8 @@ import com.amazonaws.services.ec2.model.Filter; import com.amazonaws.services.ec2.model.GroupIdentifier; import com.amazonaws.services.ec2.model.Instance; import com.amazonaws.services.ec2.model.Reservation; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.Version; import org.elasticsearch.cloud.aws.AwsEc2Service; import org.elasticsearch.cloud.aws.AwsEc2Service.DISCOVERY_EC2; @@ -175,7 +177,10 @@ public class AwsEc2UnicastHostsProvider extends AbstractComponent implements Uni emptyMap(), emptySet(), Version.CURRENT.minimumCompatibilityVersion())); } } catch (Exception e) { - logger.warn("failed ot add {}, address {}", e, instance.getInstanceId(), address); + final String finalAddress = address; + logger.warn( + (Supplier) + () -> new ParameterizedMessage("failed to add {}, address {}", instance.getInstanceId(), finalAddress), e); } } else { logger.trace("not adding {}, address is null, host_type {}", instance.getInstanceId(), hostType); diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/plugin/discovery/ec2/Ec2DiscoveryPlugin.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/plugin/discovery/ec2/Ec2DiscoveryPlugin.java index 346372f554e..7f8e983e52b 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/plugin/discovery/ec2/Ec2DiscoveryPlugin.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/plugin/discovery/ec2/Ec2DiscoveryPlugin.java @@ -19,6 +19,25 @@ package org.elasticsearch.plugin.discovery.ec2; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.SpecialPermission; +import org.elasticsearch.cloud.aws.AwsEc2Service; +import org.elasticsearch.cloud.aws.AwsEc2ServiceImpl; +import org.elasticsearch.cloud.aws.Ec2Module; +import org.elasticsearch.cloud.aws.network.Ec2NameResolver; +import org.elasticsearch.common.component.LifecycleComponent; +import org.elasticsearch.common.inject.Module; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.DiscoveryModule; +import org.elasticsearch.discovery.ec2.AwsEc2UnicastHostsProvider; +import org.elasticsearch.discovery.zen.ZenDiscovery; +import org.elasticsearch.node.Node; +import org.elasticsearch.plugins.DiscoveryPlugin; +import org.elasticsearch.plugins.Plugin; + import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; @@ -34,32 +53,12 @@ import java.util.Arrays; import java.util.Collection; import java.util.List; -import org.elasticsearch.SpecialPermission; -import org.elasticsearch.cloud.aws.AwsEc2Service; -import org.elasticsearch.cloud.aws.AwsEc2ServiceImpl; -import org.elasticsearch.cloud.aws.Ec2Module; -import org.elasticsearch.cloud.aws.network.Ec2NameResolver; -import org.elasticsearch.common.component.LifecycleComponent; -import org.elasticsearch.common.inject.Module; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.network.NetworkModule; -import org.elasticsearch.common.network.NetworkService; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.discovery.DiscoveryModule; -import org.elasticsearch.discovery.ec2.AwsEc2UnicastHostsProvider; -import org.elasticsearch.discovery.zen.ZenDiscovery; -import org.elasticsearch.node.Node; -import org.elasticsearch.plugins.DiscoveryPlugin; -import org.elasticsearch.plugins.Plugin; - /** * */ public class Ec2DiscoveryPlugin extends Plugin implements DiscoveryPlugin { - private static ESLogger logger = Loggers.getLogger(Ec2DiscoveryPlugin.class); + private static Logger logger = Loggers.getLogger(Ec2DiscoveryPlugin.class); public static final String EC2 = "ec2"; diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Mock.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Mock.java index 49f4f885419..050a25bb18d 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Mock.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Mock.java @@ -373,7 +373,7 @@ import com.amazonaws.services.ec2.model.TerminateInstancesResult; import com.amazonaws.services.ec2.model.UnassignPrivateIpAddressesRequest; import com.amazonaws.services.ec2.model.UnmonitorInstancesRequest; import com.amazonaws.services.ec2.model.UnmonitorInstancesResult; -import org.elasticsearch.common.logging.ESLogger; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.ESLoggerFactory; import java.util.ArrayList; @@ -386,7 +386,7 @@ import java.util.regex.Pattern; public class AmazonEC2Mock implements AmazonEC2 { - private static final ESLogger logger = ESLoggerFactory.getLogger(AmazonEC2Mock.class.getName()); + private static final Logger logger = ESLoggerFactory.getLogger(AmazonEC2Mock.class.getName()); public static final String PREFIX_PRIVATE_IP = "10.0.0."; public static final String PREFIX_PUBLIC_IP = "8.8.8."; diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesServiceImpl.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesServiceImpl.java index 5ec4b18e910..c6c7b9a0aef 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesServiceImpl.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesServiceImpl.java @@ -28,6 +28,8 @@ import com.google.api.client.json.jackson2.JacksonFactory; import com.google.api.services.compute.Compute; import com.google.api.services.compute.model.Instance; import com.google.api.services.compute.model.InstanceList; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.SpecialPermission; import org.elasticsearch.common.component.AbstractLifecycleComponent; @@ -82,7 +84,7 @@ public class GceInstancesServiceImpl extends AbstractLifecycleComponent implemen return instanceList.isEmpty() || instanceList.getItems() == null ? Collections.emptyList() : instanceList.getItems(); } catch (PrivilegedActionException e) { - logger.warn("Problem fetching instance list for zone {}", e, zoneId); + logger.warn((Supplier) () -> new ParameterizedMessage("Problem fetching instance list for zone {}", zoneId), e); logger.debug("Full exception:", e); // assist type inference return Collections.emptyList(); diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceModule.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceModule.java index 81d10c756e5..71e9fbc7804 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceModule.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceModule.java @@ -19,8 +19,8 @@ package org.elasticsearch.cloud.gce; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.inject.AbstractModule; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; @@ -29,7 +29,7 @@ public class GceModule extends AbstractModule { static Class computeServiceImpl = GceInstancesServiceImpl.class; protected final Settings settings; - protected final ESLogger logger = Loggers.getLogger(GceModule.class); + protected final Logger logger = Loggers.getLogger(GceModule.class); public GceModule(Settings settings) { this.settings = settings; diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java index c73df8f8395..3426e74d4a4 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java @@ -22,6 +22,8 @@ package org.elasticsearch.discovery.gce; import com.google.api.services.compute.model.AccessConfig; import com.google.api.services.compute.model.Instance; import com.google.api.services.compute.model.NetworkInterface; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.Version; import org.elasticsearch.cloud.gce.GceInstancesService; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -245,7 +247,8 @@ public class GceUnicastHostsProvider extends AbstractComponent implements Unicas } } } catch (Exception e) { - logger.warn("failed to add {}, address {}", e, name, ip_private); + final String finalIpPrivate = ip_private; + logger.warn((Supplier) () -> new ParameterizedMessage("failed to add {}, address {}", name, finalIpPrivate), e); } } diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/RetryHttpInitializerWrapper.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/RetryHttpInitializerWrapper.java index b21d397d78a..c005aa05a78 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/RetryHttpInitializerWrapper.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/RetryHttpInitializerWrapper.java @@ -29,8 +29,8 @@ import com.google.api.client.http.HttpResponse; import com.google.api.client.http.HttpUnsuccessfulResponseHandler; import com.google.api.client.util.ExponentialBackOff; import com.google.api.client.util.Sleeper; +import org.apache.logging.log4j.Logger; import org.elasticsearch.SpecialPermission; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.unit.TimeValue; @@ -43,8 +43,7 @@ public class RetryHttpInitializerWrapper implements HttpRequestInitializer { private TimeValue maxWait; - private static final ESLogger logger = - ESLoggerFactory.getLogger(RetryHttpInitializerWrapper.class.getName()); + private static final Logger logger = ESLoggerFactory.getLogger(RetryHttpInitializerWrapper.class.getName()); // Intercepts the request for filling in the "Authorization" // header field, as well as recovering from certain unsuccessful diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java index 031f7eaf10f..aeec9911824 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java @@ -21,6 +21,7 @@ package org.elasticsearch.plugin.discovery.gce; import com.google.api.client.http.HttpHeaders; import com.google.api.client.util.ClassInfo; +import org.apache.logging.log4j.Logger; import org.elasticsearch.SpecialPermission; import org.elasticsearch.cloud.gce.GceInstancesService; import org.elasticsearch.cloud.gce.GceMetadataService; @@ -28,7 +29,6 @@ import org.elasticsearch.cloud.gce.GceModule; import org.elasticsearch.cloud.gce.network.GceNameResolver; import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.inject.Module; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Setting; @@ -51,7 +51,7 @@ public class GceDiscoveryPlugin extends Plugin implements DiscoveryPlugin { public static final String GCE = "gce"; private final Settings settings; - protected final ESLogger logger = Loggers.getLogger(GceDiscoveryPlugin.class); + protected final Logger logger = Loggers.getLogger(GceDiscoveryPlugin.class); static { /* diff --git a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoverTests.java b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoverTests.java index adb06f25958..98f6fd0dc1b 100644 --- a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoverTests.java +++ b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoverTests.java @@ -23,11 +23,11 @@ import com.sun.net.httpserver.Headers; import com.sun.net.httpserver.HttpServer; import com.sun.net.httpserver.HttpsConfigurator; import com.sun.net.httpserver.HttpsServer; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cloud.gce.GceInstancesServiceImpl; import org.elasticsearch.cloud.gce.GceMetadataService; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.FileSystemUtils; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -40,6 +40,7 @@ import org.junit.BeforeClass; import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.SSLContext; import javax.net.ssl.TrustManagerFactory; + import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -125,7 +126,7 @@ public class GceDiscoverTests extends ESIntegTestCase { httpsServer.createContext("/compute/v1/projects/testproject/zones/primaryzone/instances", (s) -> { Headers headers = s.getResponseHeaders(); headers.add("Content-Type", "application/json; charset=UTF-8"); - ESLogger logger = Loggers.getLogger(GceDiscoverTests.class); + Logger logger = Loggers.getLogger(GceDiscoverTests.class); try { Path[] files = FileSystemUtils.files(logDir); StringBuilder builder = new StringBuilder("{\"id\": \"dummy\",\"items\":["); diff --git a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceMockUtils.java b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceMockUtils.java index 88a6fbd9e92..01009554535 100644 --- a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceMockUtils.java +++ b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceMockUtils.java @@ -26,9 +26,9 @@ import com.google.api.client.json.Json; import com.google.api.client.testing.http.MockHttpTransport; import com.google.api.client.testing.http.MockLowLevelHttpRequest; import com.google.api.client.testing.http.MockLowLevelHttpResponse; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.Streams; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.util.Callback; @@ -37,7 +37,7 @@ import java.io.InputStream; import java.net.URL; public class GceMockUtils { - protected static final ESLogger logger = Loggers.getLogger(GceMockUtils.class); + protected static final Logger logger = Loggers.getLogger(GceMockUtils.class); public static final String GCE_METADATA_URL = "http://metadata.google.internal/computeMetadata/v1/instance"; diff --git a/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineService.java b/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineService.java index 3477f62a5bc..da3d14cd02b 100644 --- a/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineService.java +++ b/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineService.java @@ -25,6 +25,7 @@ import org.elasticsearch.SpecialPermission; import org.elasticsearch.bootstrap.BootstrapInfo; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.script.ClassPermission; import org.elasticsearch.script.CompiledScript; @@ -138,6 +139,8 @@ public class JavaScriptScriptEngineService extends AbstractComponent implements public JavaScriptScriptEngineService(Settings settings) { super(settings); + deprecationLogger.deprecated("[javascript] scripts are deprecated, use [painless] scripts instead"); + Context ctx = Context.enter(); try { globalScope = ctx.initStandardObjects(null, true); @@ -173,6 +176,8 @@ public class JavaScriptScriptEngineService extends AbstractComponent implements @Override public ExecutableScript executable(CompiledScript compiledScript, @Nullable Map vars) { + deprecationLogger.deprecated("[javascript] scripts are deprecated, use [painless] scripts instead"); + Context ctx = Context.enter(); try { Scriptable scope = ctx.newObject(globalScope); @@ -192,6 +197,8 @@ public class JavaScriptScriptEngineService extends AbstractComponent implements @Override public SearchScript search(final CompiledScript compiledScript, final SearchLookup lookup, @Nullable final Map vars) { + deprecationLogger.deprecated("[javascript] scripts are deprecated, use [painless] scripts instead"); + Context ctx = Context.enter(); try { final Scriptable scope = ctx.newObject(globalScope); diff --git a/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptMultiThreadedTests.java b/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptMultiThreadedTests.java index 634a4ca6dfa..c3614952ecf 100644 --- a/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptMultiThreadedTests.java +++ b/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptMultiThreadedTests.java @@ -41,7 +41,7 @@ public class JavaScriptScriptMultiThreadedTests extends ESTestCase { final Object compiled = se.compile(null, "x + y", Collections.emptyMap()); final AtomicBoolean failed = new AtomicBoolean(); - Thread[] threads = new Thread[50]; + Thread[] threads = new Thread[between(3, 12)]; final CountDownLatch latch = new CountDownLatch(threads.length); final CyclicBarrier barrier = new CyclicBarrier(threads.length + 1); for (int i = 0; i < threads.length; i++) { @@ -57,7 +57,7 @@ public class JavaScriptScriptMultiThreadedTests extends ESTestCase { vars.put("x", x); vars.put("y", y); ExecutableScript script = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testExecutableNoRuntimeParams", "js", compiled), vars); - for (int i = 0; i < 100000; i++) { + for (int i = 0; i < between(100, 1000); i++) { long result = ((Number) script.run()).longValue(); assertThat(result, equalTo(addition)); } @@ -83,7 +83,7 @@ public class JavaScriptScriptMultiThreadedTests extends ESTestCase { final Object compiled = se.compile(null, "x + y", Collections.emptyMap()); final AtomicBoolean failed = new AtomicBoolean(); - Thread[] threads = new Thread[50]; + Thread[] threads = new Thread[between(3, 12)]; final CountDownLatch latch = new CountDownLatch(threads.length); final CyclicBarrier barrier = new CyclicBarrier(threads.length + 1); for (int i = 0; i < threads.length; i++) { @@ -96,7 +96,7 @@ public class JavaScriptScriptMultiThreadedTests extends ESTestCase { Map vars = new HashMap(); vars.put("x", x); ExecutableScript script = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testExecutableNoRuntimeParams", "js", compiled), vars); - for (int i = 0; i < 100000; i++) { + for (int i = 0; i < between(100, 1000); i++) { long y = Randomness.get().nextInt(); long addition = x + y; script.setNextVar("y", y); @@ -125,7 +125,7 @@ public class JavaScriptScriptMultiThreadedTests extends ESTestCase { final Object compiled = se.compile(null, "x + y", Collections.emptyMap()); final AtomicBoolean failed = new AtomicBoolean(); - Thread[] threads = new Thread[50]; + Thread[] threads = new Thread[between(3, 12)]; final CountDownLatch latch = new CountDownLatch(threads.length); final CyclicBarrier barrier = new CyclicBarrier(threads.length + 1); for (int i = 0; i < threads.length; i++) { @@ -135,7 +135,7 @@ public class JavaScriptScriptMultiThreadedTests extends ESTestCase { try { barrier.await(); Map runtimeVars = new HashMap(); - for (int i = 0; i < 100000; i++) { + for (int i = 0; i < between(100, 1000); i++) { long x = Randomness.get().nextInt(); long y = Randomness.get().nextInt(); long addition = x + y; diff --git a/plugins/lang-python/src/main/java/org/elasticsearch/script/python/PythonScriptEngineService.java b/plugins/lang-python/src/main/java/org/elasticsearch/script/python/PythonScriptEngineService.java index 5a16c06d4dc..d31e691b994 100644 --- a/plugins/lang-python/src/main/java/org/elasticsearch/script/python/PythonScriptEngineService.java +++ b/plugins/lang-python/src/main/java/org/elasticsearch/script/python/PythonScriptEngineService.java @@ -62,6 +62,8 @@ public class PythonScriptEngineService extends AbstractComponent implements Scri public PythonScriptEngineService(Settings settings) { super(settings); + deprecationLogger.deprecated("[python] scripts are deprecated, use [painless] scripts instead."); + // classloader created here final SecurityManager sm = System.getSecurityManager(); if (sm != null) { @@ -118,11 +120,15 @@ public class PythonScriptEngineService extends AbstractComponent implements Scri @Override public ExecutableScript executable(CompiledScript compiledScript, Map vars) { + deprecationLogger.deprecated("[python] scripts are deprecated, use [painless] scripts instead"); + return new PythonExecutableScript((PyCode) compiledScript.compiled(), vars); } @Override public SearchScript search(final CompiledScript compiledScript, final SearchLookup lookup, @Nullable final Map vars) { + deprecationLogger.deprecated("[python] scripts are deprecated, use [painless] scripts instead"); + return new SearchScript() { @Override public LeafSearchScript getLeafSearchScript(LeafReaderContext context) throws IOException { diff --git a/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java b/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java index a7d8228397e..1031f0d1034 100644 --- a/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java +++ b/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java @@ -19,6 +19,7 @@ package org.elasticsearch.mapper.attachments; +import org.apache.logging.log4j.Logger; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.search.Query; @@ -26,7 +27,6 @@ import org.apache.tika.language.LanguageIdentifier; import org.apache.tika.metadata.Metadata; import org.elasticsearch.Version; import org.elasticsearch.common.collect.Iterators; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -75,7 +75,7 @@ import static org.elasticsearch.index.mapper.TypeParsers.parseMultiField; */ public class AttachmentMapper extends FieldMapper { - private static ESLogger logger = ESLoggerFactory.getLogger("mapper.attachment"); + private static Logger logger = ESLoggerFactory.getLogger("mapper.attachment"); public static final Setting INDEX_ATTACHMENT_IGNORE_ERRORS_SETTING = Setting.boolSetting("index.mapping.attachment.ignore_errors", true, Property.IndexScope); public static final Setting INDEX_ATTACHMENT_DETECT_LANGUAGE_SETTING = @@ -653,4 +653,5 @@ public class AttachmentMapper extends FieldMapper { protected String contentType() { return CONTENT_TYPE; } + } diff --git a/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/MapperAttachmentsPlugin.java b/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/MapperAttachmentsPlugin.java index 6cf957f05c0..36b12ec0f42 100644 --- a/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/MapperAttachmentsPlugin.java +++ b/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/MapperAttachmentsPlugin.java @@ -19,22 +19,22 @@ package org.elasticsearch.mapper.attachments; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.Map; - +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.plugins.Plugin; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; + public class MapperAttachmentsPlugin extends Plugin implements MapperPlugin { - private static ESLogger logger = ESLoggerFactory.getLogger("mapper.attachment"); + private static Logger logger = ESLoggerFactory.getLogger("mapper.attachment"); private static DeprecationLogger deprecationLogger = new DeprecationLogger(logger); @Override @@ -50,4 +50,5 @@ public class MapperAttachmentsPlugin extends Plugin implements MapperPlugin { public Map getMappers() { return Collections.singletonMap("attachment", new AttachmentMapper.TypeParser()); } + } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java index 8cb7b9085e4..05bb911476a 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java @@ -20,12 +20,12 @@ package org.elasticsearch.cloud.azure.blobstore; import com.microsoft.azure.storage.StorageException; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.support.AbstractBlobContainer; import org.elasticsearch.common.io.Streams; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.repositories.RepositoryException; @@ -43,7 +43,7 @@ import java.util.Map; */ public class AzureBlobContainer extends AbstractBlobContainer { - protected final ESLogger logger = Loggers.getLogger(AzureBlobContainer.class); + protected final Logger logger = Loggers.getLogger(AzureBlobContainer.class); protected final AzureBlobStore blobStore; protected final String keyPath; diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java index 4e5dfb3efd5..cd201e7ff56 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java @@ -27,6 +27,8 @@ import com.microsoft.azure.storage.blob.CloudBlobClient; import com.microsoft.azure.storage.blob.CloudBlobContainer; import com.microsoft.azure.storage.blob.CloudBlockBlob; import com.microsoft.azure.storage.blob.ListBlobItem; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; @@ -172,7 +174,7 @@ public class AzureStorageServiceImpl extends AbstractComponent implements AzureS logger.trace("creating container [{}]", container); blobContainer.createIfNotExists(); } catch (IllegalArgumentException e) { - logger.trace("fails creating container [{}]", e, container); + logger.trace((Supplier) () -> new ParameterizedMessage("fails creating container [{}]", container), e); throw new RepositoryException(container, e.getMessage()); } } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java b/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java index fcd7bf96b2c..5b938fce188 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java @@ -19,15 +19,8 @@ package org.elasticsearch.plugin.repository.azure; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.Map; - import org.elasticsearch.cloud.azure.storage.AzureStorageService; import org.elasticsearch.cloud.azure.storage.AzureStorageServiceImpl; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; @@ -36,6 +29,11 @@ import org.elasticsearch.plugins.RepositoryPlugin; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.azure.AzureRepository; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; + /** * A plugin to add a repository type that writes to and from the Azure cloud storage service. */ diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 544e0407738..8cfb5043b6c 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -362,4 +362,9 @@ thirdPartyAudit.excludes = [ // optional dependencies of slf4j-api 'org.slf4j.impl.StaticMDCBinder', 'org.slf4j.impl.StaticMarkerBinder', + + 'org.apache.log4j.AppenderSkeleton', + 'org.apache.log4j.AsyncAppender', + 'org.apache.log4j.helpers.ISO8601DateFormat', + 'org.apache.log4j.spi.ThrowableInformation' ] diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index a6610178ce8..b1369908670 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -48,6 +48,12 @@ dependencyLicenses { mapping from: /jaxb-.*/, to: 'jaxb' } +bundlePlugin { + from('config/repository-s3') { + into 'config' + } +} + test { // this is needed for insecure plugins, remove if possible! systemProperty 'tests.artifact', project.name diff --git a/plugins/repository-s3/config/repository-s3/log4j2.properties b/plugins/repository-s3/config/repository-s3/log4j2.properties new file mode 100644 index 00000000000..3fee57ce3e2 --- /dev/null +++ b/plugins/repository-s3/config/repository-s3/log4j2.properties @@ -0,0 +1,8 @@ +logger.com_amazonaws.name = com.amazonaws +logger.com_amazonaws.level = warn + +logger.com_amazonaws_jmx_SdkMBeanRegistrySupport.name = com.amazonaws.jmx.SdkMBeanRegistrySupport +logger.com_amazonaws_jmx_SdkMBeanRegistrySupport.level = error + +logger_com_amazonaws_metrics_AwsSdkMetrics.name = com.amazonaws.metrics.AwsSdkMetrics +logger_com_amazonaws_metrics_AwsSdkMetrics.level = error diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/AwsSigner.java b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/AwsSigner.java index 5c02671e5e9..c1c36031b5e 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/AwsSigner.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/AwsSigner.java @@ -21,12 +21,12 @@ package org.elasticsearch.cloud.aws; import com.amazonaws.ClientConfiguration; import com.amazonaws.auth.SignerFactory; -import org.elasticsearch.common.logging.ESLogger; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.Loggers; public class AwsSigner { - private static final ESLogger logger = Loggers.getLogger(AwsSigner.class); + private static final Logger logger = Loggers.getLogger(AwsSigner.class); private AwsSigner() { diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/InternalAwsS3Service.java b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/InternalAwsS3Service.java index c4d8a63adc6..a9091788f28 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/InternalAwsS3Service.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/InternalAwsS3Service.java @@ -29,11 +29,11 @@ import com.amazonaws.internal.StaticCredentialsProvider; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3Client; import com.amazonaws.services.s3.S3ClientOptions; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.s3.S3Repository; @@ -85,7 +85,7 @@ public class InternalAwsS3Service extends AbstractLifecycleComponent implements return client; } - public static ClientConfiguration buildConfiguration(ESLogger logger, Settings settings, Protocol protocol, Integer maxRetries, + public static ClientConfiguration buildConfiguration(Logger logger, Settings settings, Protocol protocol, Integer maxRetries, String endpoint, boolean useThrottleRetries) { ClientConfiguration clientConfiguration = new ClientConfiguration(); // the response metadata cache is only there for diagnostics purposes, @@ -122,7 +122,7 @@ public class InternalAwsS3Service extends AbstractLifecycleComponent implements return clientConfiguration; } - public static AWSCredentialsProvider buildCredentials(ESLogger logger, Settings settings, Settings repositorySettings) { + public static AWSCredentialsProvider buildCredentials(Logger logger, Settings settings, Settings repositorySettings) { AWSCredentialsProvider credentials; String key = getValue(repositorySettings, settings, S3Repository.Repository.KEY_SETTING, S3Repository.Repositories.KEY_SETTING); @@ -140,7 +140,7 @@ public class InternalAwsS3Service extends AbstractLifecycleComponent implements return credentials; } - protected static String findEndpoint(ESLogger logger, Settings settings, String endpoint, String region) { + protected static String findEndpoint(Logger logger, Settings settings, String endpoint, String region) { if (Strings.isNullOrEmpty(endpoint)) { logger.debug("no repository level endpoint has been defined. Trying to guess from repository region [{}]", region); if (!region.isEmpty()) { diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/blobstore/DefaultS3OutputStream.java b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/blobstore/DefaultS3OutputStream.java index dd278a9231d..ef9b25b2d1e 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/blobstore/DefaultS3OutputStream.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/blobstore/DefaultS3OutputStream.java @@ -31,7 +31,7 @@ import com.amazonaws.services.s3.model.PutObjectResult; import com.amazonaws.services.s3.model.UploadPartRequest; import com.amazonaws.services.s3.model.UploadPartResult; import com.amazonaws.util.Base64; -import org.elasticsearch.common.logging.ESLogger; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -64,7 +64,7 @@ import java.util.List; public class DefaultS3OutputStream extends S3OutputStream { private static final ByteSizeValue MULTIPART_MAX_SIZE = new ByteSizeValue(5, ByteSizeUnit.GB); - private static final ESLogger logger = Loggers.getLogger("cloud.aws"); + private static final Logger logger = Loggers.getLogger("cloud.aws"); /** * Multipart Upload API data */ diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/TestAmazonS3.java b/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/TestAmazonS3.java index 31682ee4de6..37087db386b 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/TestAmazonS3.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/TestAmazonS3.java @@ -28,8 +28,8 @@ import com.amazonaws.services.s3.model.PutObjectResult; import com.amazonaws.services.s3.model.S3Object; import com.amazonaws.services.s3.model.UploadPartRequest; import com.amazonaws.services.s3.model.UploadPartResult; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; @@ -49,7 +49,7 @@ import static com.carrotsearch.randomizedtesting.RandomizedTest.randomDouble; */ public class TestAmazonS3 extends AmazonS3Wrapper { - protected final ESLogger logger = Loggers.getLogger(getClass()); + protected final Logger logger = Loggers.getLogger(getClass()); private double writeFailureRate = 0.0; private double readFailureRate = 0.0; diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java index d9d15ce0b3b..4d7f30ed9db 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java @@ -24,6 +24,8 @@ import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.model.DeleteObjectsRequest; import com.amazonaws.services.s3.model.ObjectListing; import com.amazonaws.services.s3.model.S3ObjectSummary; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; @@ -511,7 +513,7 @@ public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase client.deleteObjects(multiObjectDeleteRequest); } } catch (Exception ex) { - logger.warn("Failed to delete S3 repository [{}] in [{}]", ex, bucketName, region); + logger.warn((Supplier) () -> new ParameterizedMessage("Failed to delete S3 repository [{}] in [{}]", bucketName, region), ex); } } } diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerConfigurationTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerConfigurationTests.java new file mode 100644 index 00000000000..4081eab1f2c --- /dev/null +++ b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerConfigurationTests.java @@ -0,0 +1,126 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.logging; + +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.Appender; +import org.apache.logging.log4j.core.LoggerContext; +import org.apache.logging.log4j.core.config.Configuration; +import org.apache.logging.log4j.core.config.Configurator; +import org.apache.logging.log4j.core.config.LoggerConfig; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.nio.file.Path; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; + +public class EvilLoggerConfigurationTests extends ESTestCase { + + public void testResolveMultipleConfigs() throws Exception { + final Level level = ESLoggerFactory.getLogger("test").getLevel(); + try { + final Path configDir = getDataPath("config"); + final Settings settings = Settings.builder() + .put(Environment.PATH_CONF_SETTING.getKey(), configDir.toAbsolutePath()) + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .build(); + final Environment environment = new Environment(settings); + LogConfigurator.configure(environment, true); + + { + final LoggerContext ctx = (LoggerContext) LogManager.getContext(false); + final Configuration config = ctx.getConfiguration(); + final LoggerConfig loggerConfig = config.getLoggerConfig("test"); + final Appender appender = loggerConfig.getAppenders().get("console"); + assertThat(appender, notNullValue()); + } + + { + final LoggerContext ctx = (LoggerContext) LogManager.getContext(false); + final Configuration config = ctx.getConfiguration(); + final LoggerConfig loggerConfig = config.getLoggerConfig("second"); + final Appender appender = loggerConfig.getAppenders().get("console2"); + assertThat(appender, notNullValue()); + } + + { + final LoggerContext ctx = (LoggerContext) LogManager.getContext(false); + final Configuration config = ctx.getConfiguration(); + final LoggerConfig loggerConfig = config.getLoggerConfig("third"); + final Appender appender = loggerConfig.getAppenders().get("console3"); + assertThat(appender, notNullValue()); + } + } finally { + Configurator.setLevel("test", level); + } + } + + public void testDefaults() throws IOException { + final Path configDir = getDataPath("config"); + final String level = randomFrom(Level.values()).toString(); + final Settings settings = Settings.builder() + .put(Environment.PATH_CONF_SETTING.getKey(), configDir.toAbsolutePath()) + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .put("logger.level", level) + .build(); + final Environment environment = new Environment(settings); + LogConfigurator.configure(environment, true); + + final String loggerName; + if (LogManager.getContext(false).hasLogger("org.elasticsearch.test", new PrefixMessageFactory())) { + loggerName = "org.elasticsearch.test"; + } else { + assertTrue(LogManager.getContext(false).hasLogger("test", new PrefixMessageFactory())); + loggerName = "test"; + } + final Logger logger = ESLoggerFactory.getLogger(loggerName); + assertThat(logger.getLevel().toString(), equalTo(level)); + } + + // tests that custom settings are not overwritten by settings in the config file + public void testResolveOrder() throws Exception { + final Path configDir = getDataPath("config"); + final Settings settings = Settings.builder() + .put(Environment.PATH_CONF_SETTING.getKey(), configDir.toAbsolutePath()) + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .put("logger.test_resolve_order", "TRACE") + .build(); + final Environment environment = new Environment(settings); + LogConfigurator.configure(environment, true); + + // args should overwrite whatever is in the config + final String loggerName; + if (LogManager.getContext(false).hasLogger("org.elasticsearch.test_resolve_order", new PrefixMessageFactory())) { + loggerName = "org.elasticsearch.test_resolve_order"; + } else { + assertTrue(LogManager.getContext(false).hasLogger("test_resolve_order", new PrefixMessageFactory())); + loggerName = "test_resolve_order"; + } + final Logger logger = ESLoggerFactory.getLogger(loggerName); + assertTrue(logger.isTraceEnabled()); + } + +} diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java new file mode 100644 index 00000000000..168453df763 --- /dev/null +++ b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java @@ -0,0 +1,113 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.logging; + +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.Version; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.hamcrest.RegexMatcher; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import static org.hamcrest.Matchers.equalTo; + +public class EvilLoggerTests extends ESTestCase { + + private Logger testLogger; + private DeprecationLogger deprecationLogger; + + @Override + public void setUp() throws Exception { + super.setUp(); + + final Path configDir = getDataPath("config"); + // need to set custom path.conf so we can use a custom log4j2.properties file for the test + final Settings settings = Settings.builder() + .put(Environment.PATH_CONF_SETTING.getKey(), configDir.toAbsolutePath()) + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .build(); + final Environment environment = new Environment(settings); + LogConfigurator.configure(environment, true); + + testLogger = ESLoggerFactory.getLogger("test"); + deprecationLogger = ESLoggerFactory.getDeprecationLogger("test"); + } + + public void testLocationInfoTest() throws IOException { + testLogger.error("This is an error message"); + testLogger.warn("This is a warning message"); + testLogger.info("This is an info message"); + testLogger.debug("This is a debug message"); + testLogger.trace("This is a trace message"); + final String path = System.getProperty("es.logs") + ".log"; + final List events = Files.readAllLines(PathUtils.get(path)); + assertThat(events.size(), equalTo(6)); // the five messages me log plus a warning for unsupported configuration files + final String location = "org.elasticsearch.common.logging.EvilLoggerTests.testLocationInfoTest"; + // the first message is a warning for unsupported configuration files + assertLogLine(events.get(1), Level.ERROR, location, "This is an error message"); + assertLogLine(events.get(2), Level.WARN, location, "This is a warning message"); + assertLogLine(events.get(3), Level.INFO, location, "This is an info message"); + assertLogLine(events.get(4), Level.DEBUG, location, "This is a debug message"); + assertLogLine(events.get(5), Level.TRACE, location, "This is a trace message"); + } + + private void assertLogLine(final String logLine, final Level level, final String location, final String message) { + final Matcher matcher = Pattern.compile("\\[(.*)\\]\\[(.*)\\(.*\\)\\] (.*)").matcher(logLine); + assertTrue(logLine, matcher.matches()); + assertThat(matcher.group(1), equalTo(level.toString())); + assertThat(matcher.group(2), RegexMatcher.matches(location)); + assertThat(matcher.group(3), RegexMatcher.matches(message)); + } + + public void testDeprecationLogger() throws IOException { + deprecationLogger.deprecated("This is a deprecation message"); + final String deprecationPath = System.getProperty("es.logs") + "_deprecation.log"; + final List deprecationEvents = Files.readAllLines(PathUtils.get(deprecationPath)); + assertThat(deprecationEvents.size(), equalTo(1)); + assertLogLine( + deprecationEvents.get(0), + Level.WARN, + "org.elasticsearch.common.logging.DeprecationLogger.deprecated", + "This is a deprecation message"); + } + + public void testUnsupportedLoggingConfigurationFiles() throws IOException { + // TODO: the warning for unsupported logging configurations can be removed in 6.0.0 + assert Version.CURRENT.major < 6; + final String path = System.getProperty("es.logs") + ".log"; + final List events = Files.readAllLines(PathUtils.get(path)); + assertThat(events.size(), equalTo(1)); + assertLogLine( + events.get(0), + Level.WARN, + "org\\.elasticsearch\\.common\\.logging\\.LogConfigurator.*", + "ignoring unsupported logging configuration file \\[.*\\], logging is configured via \\[.*\\]"); + } + +} diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java index e2910be64f0..ab4f00492b0 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java @@ -19,6 +19,14 @@ package org.elasticsearch.plugins; +import org.apache.lucene.util.LuceneTestCase; +import org.elasticsearch.cli.MockTerminal; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; + import java.io.IOException; import java.nio.file.DirectoryStream; import java.nio.file.Files; @@ -26,13 +34,8 @@ import java.nio.file.Path; import java.util.HashMap; import java.util.Map; -import org.apache.lucene.util.LuceneTestCase; -import org.elasticsearch.cli.UserException; -import org.elasticsearch.cli.MockTerminal; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; -import org.elasticsearch.test.ESTestCase; -import org.junit.Before; +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.not; @LuceneTestCase.SuppressFileSystems("*") public class RemovePluginCommandTests extends ESTestCase { @@ -109,4 +112,26 @@ public class RemovePluginCommandTests extends ESTestCase { assertRemoveCleaned(env); } + public void testConfigDirPreserved() throws Exception { + Files.createDirectories(env.pluginsFile().resolve("fake")); + final Path configDir = env.configFile().resolve("fake"); + Files.createDirectories(configDir); + Files.createFile(configDir.resolve("fake.yml")); + final MockTerminal terminal = removePlugin("fake", home); + assertTrue(Files.exists(env.configFile().resolve("fake"))); + assertThat(terminal.getOutput(), containsString(expectedConfigDirPreservedMessage(configDir))); + assertRemoveCleaned(env); + } + + public void testNoConfigDirPreserved() throws Exception { + Files.createDirectories(env.pluginsFile().resolve("fake")); + final Path configDir = env.configFile().resolve("fake"); + final MockTerminal terminal = removePlugin("fake", home); + assertThat(terminal.getOutput(), not(containsString(expectedConfigDirPreservedMessage(configDir)))); + } + + private String expectedConfigDirPreservedMessage(final Path configDir) { + return "-> Preserving plugin config files [" + configDir + "] in case of upgrade, delete manually if not needed"; + } + } diff --git a/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/config/log4j2.properties b/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/config/log4j2.properties new file mode 100644 index 00000000000..42fd3f35359 --- /dev/null +++ b/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/config/log4j2.properties @@ -0,0 +1,33 @@ +status = error + +appender.console.type = Console +appender.console.name = console +appender.console.layout.type = PatternLayout +appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %m%n + +appender.file.type = File +appender.file.name = file +appender.file.fileName = ${sys:es.logs}.log +appender.file.layout.type = PatternLayout +appender.file.layout.pattern = [%p][%l] %m%n + +rootLogger.level = info +rootLogger.appenderRef.console.ref = console +rootLogger.appenderRef.file.ref = file + +logger.test.name = test +logger.test.level = trace +logger.test.appenderRef.console.ref = console +logger.test.appenderRef.file.ref = file +logger.test.additivity = false + +appender.deprecation_file.type = File +appender.deprecation_file.name = deprecation_file +appender.deprecation_file.fileName = ${sys:es.logs}_deprecation.log +appender.deprecation_file.layout.type = PatternLayout +appender.deprecation_file.layout.pattern = [%p][%l] %m%n + +logger.deprecation.name = deprecation +logger.deprecation.level = warn +logger.deprecation.appenderRef.deprecation_file.ref = deprecation_file +logger.deprecation.additivity = false diff --git a/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/config/logging.yml b/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/config/logging.yml new file mode 100644 index 00000000000..5aa98f454f3 --- /dev/null +++ b/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/config/logging.yml @@ -0,0 +1,2 @@ +logger.level: INFO +rootLogger: ${logger.level}, terminal diff --git a/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/config/second/log4j2.properties b/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/config/second/log4j2.properties new file mode 100644 index 00000000000..2ade4c896c3 --- /dev/null +++ b/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/config/second/log4j2.properties @@ -0,0 +1,8 @@ +appender.console2.type = Console +appender.console2.name = console2 +appender.console2.layout.type = PatternLayout +appender.console2.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %m%n + +logger.second.name = second +logger.second.level = debug +logger.second.appenderRef.console2.ref = console2 diff --git a/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/config/third/log4j2.properties b/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/config/third/log4j2.properties new file mode 100644 index 00000000000..8699f574d53 --- /dev/null +++ b/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/config/third/log4j2.properties @@ -0,0 +1,8 @@ +appender.console3.type = Console +appender.console3.name = console3 +appender.console3.layout.type = PatternLayout +appender.console3.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %m%n + +logger.third.name = third +logger.third.level = debug +logger.third.appenderRef.console3.ref = console3 diff --git a/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/ESSmokeClientTestCase.java b/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/ESSmokeClientTestCase.java index 3402e6ea2c8..d36c2aa04d2 100644 --- a/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/ESSmokeClientTestCase.java +++ b/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/ESSmokeClientTestCase.java @@ -19,11 +19,11 @@ package org.elasticsearch.smoketest; +import org.apache.logging.log4j.Logger; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.client.Client; import org.elasticsearch.client.transport.TransportClient; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; @@ -72,7 +72,7 @@ public abstract class ESSmokeClientTestCase extends LuceneTestCase { */ public static final String TESTS_CLUSTER = "tests.cluster"; - protected static final ESLogger logger = ESLoggerFactory.getLogger(ESSmokeClientTestCase.class.getName()); + protected static final Logger logger = ESLoggerFactory.getLogger(ESSmokeClientTestCase.class.getName()); private static final AtomicInteger counter = new AtomicInteger(); private static Client client; diff --git a/qa/vagrant/src/test/resources/packaging/scripts/30_deb_package.bats b/qa/vagrant/src/test/resources/packaging/scripts/30_deb_package.bats index 1f4cdeeeb3f..2fdeed4d13f 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/30_deb_package.bats +++ b/qa/vagrant/src/test/resources/packaging/scripts/30_deb_package.bats @@ -133,7 +133,7 @@ setup() { assert_file_exist "/etc/elasticsearch" assert_file_exist "/etc/elasticsearch/elasticsearch.yml" assert_file_exist "/etc/elasticsearch/jvm.options" - assert_file_exist "/etc/elasticsearch/logging.yml" + assert_file_exist "/etc/elasticsearch/log4j2.properties" # The env file is still here assert_file_exist "/etc/default/elasticsearch" @@ -154,7 +154,7 @@ setup() { assert_file_not_exist "/etc/elasticsearch" assert_file_not_exist "/etc/elasticsearch/elasticsearch.yml" assert_file_not_exist "/etc/elasticsearch/jvm.options" - assert_file_not_exist "/etc/elasticsearch/logging.yml" + assert_file_not_exist "/etc/elasticsearch/log4j2.properties" assert_file_not_exist "/etc/default/elasticsearch" diff --git a/qa/vagrant/src/test/resources/packaging/scripts/40_rpm_package.bats b/qa/vagrant/src/test/resources/packaging/scripts/40_rpm_package.bats index c47f24e6c67..a7aa860370c 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/40_rpm_package.bats +++ b/qa/vagrant/src/test/resources/packaging/scripts/40_rpm_package.bats @@ -121,7 +121,7 @@ setup() { assert_file_not_exist "/etc/elasticsearch" assert_file_not_exist "/etc/elasticsearch/elasticsearch.yml" assert_file_not_exist "/etc/elasticsearch/jvm.options" - assert_file_not_exist "/etc/elasticsearch/logging.yml" + assert_file_not_exist "/etc/elasticsearch/log4j2.properties" assert_file_not_exist "/etc/init.d/elasticsearch" assert_file_not_exist "/usr/lib/systemd/system/elasticsearch.service" @@ -140,7 +140,7 @@ setup() { @test "[RPM] reremove package" { echo "# ping" >> "/etc/elasticsearch/elasticsearch.yml" echo "# ping" >> "/etc/elasticsearch/jvm.options" - echo "# ping" >> "/etc/elasticsearch/logging.yml" + echo "# ping" >> "/etc/elasticsearch/log4j2.properties" echo "# ping" >> "/etc/elasticsearch/scripts/script" rpm -e 'elasticsearch' } @@ -163,8 +163,8 @@ setup() { assert_file_exist "/etc/elasticsearch/elasticsearch.yml.rpmsave" assert_file_not_exist "/etc/elasticsearch/jvm.options" assert_file_exist "/etc/elasticsearch/jvm.options.rpmsave" - assert_file_not_exist "/etc/elasticsearch/logging.yml" - assert_file_exist "/etc/elasticsearch/logging.yml.rpmsave" + assert_file_not_exist "/etc/elasticsearch/log4j2.properties" + assert_file_exist "/etc/elasticsearch/log4j2.properties.rpmsave" # older versions of rpm behave differently and preserve the # directory but do not append the ".rpmsave" suffix test -e "/etc/elasticsearch/scripts" || test -e "/etc/elasticsearch/scripts.rpmsave" diff --git a/qa/vagrant/src/test/resources/packaging/scripts/os_package.bash b/qa/vagrant/src/test/resources/packaging/scripts/os_package.bash index ee6e491d169..d46d7fe9a5d 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/os_package.bash +++ b/qa/vagrant/src/test/resources/packaging/scripts/os_package.bash @@ -79,7 +79,7 @@ verify_package_installation() { assert_file "$ESHOME/lib" d root root 755 assert_file "$ESCONFIG" d root elasticsearch 750 assert_file "$ESCONFIG/elasticsearch.yml" f root elasticsearch 750 - assert_file "$ESCONFIG/logging.yml" f root elasticsearch 750 + assert_file "$ESCONFIG/log4j2.properties" f root elasticsearch 750 assert_file "$ESSCRIPTS" d root elasticsearch 750 assert_file "$ESDATA" d elasticsearch elasticsearch 755 assert_file "$ESLOG" d elasticsearch elasticsearch 755 diff --git a/qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash b/qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash index dbe2633e770..fbda05d5f30 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash +++ b/qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash @@ -500,7 +500,7 @@ move_config() { mv "$oldConfig"/* "$ESCONFIG" chown -R elasticsearch:elasticsearch "$ESCONFIG" assert_file_exist "$ESCONFIG/elasticsearch.yml" - assert_file_exist "$ESCONFIG/logging.yml" + assert_file_exist "$ESCONFIG/log4j2.properties" } # Copies a script into the Elasticsearch install. diff --git a/qa/vagrant/src/test/resources/packaging/scripts/tar.bash b/qa/vagrant/src/test/resources/packaging/scripts/tar.bash index 798ec6c2997..0ea86ddcc6e 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/tar.bash +++ b/qa/vagrant/src/test/resources/packaging/scripts/tar.bash @@ -86,7 +86,7 @@ verify_archive_installation() { assert_file "$ESHOME/bin/elasticsearch-plugin" f assert_file "$ESCONFIG" d assert_file "$ESCONFIG/elasticsearch.yml" f - assert_file "$ESCONFIG/logging.yml" f + assert_file "$ESCONFIG/log4j2.properties" f assert_file "$ESHOME/lib" d assert_file "$ESHOME/NOTICE.txt" f assert_file "$ESHOME/LICENSE.txt" f diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.stats/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.stats/10_basic.yaml index 08d13e04bea..ccbfc199cec 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.stats/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.stats/10_basic.yaml @@ -19,6 +19,11 @@ - gte: { nodes.count.ingest: 0} - gte: { nodes.count.coordinating_only: 0} - is_true: nodes.os + - is_true: nodes.os.mem.total_in_bytes + - is_true: nodes.os.mem.free_in_bytes + - is_true: nodes.os.mem.used_in_bytes + - is_true: nodes.os.mem.free_percent + - is_true: nodes.os.mem.used_percent - is_true: nodes.process - is_true: nodes.jvm - is_true: nodes.fs diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/15_without_id.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/15_without_id.yaml index 0342fdb019b..1126a3d085c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/15_without_id.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/create/15_without_id.yaml @@ -1,25 +1,8 @@ --- "Create without ID": - do: + catch: /Validation|Invalid/ create: index: test_1 type: test body: { foo: bar } - - - is_true: _id - - match: { _index: test_1 } - - match: { _type: test } - - match: { _version: 1 } - - set: { _id: id } - - - do: - get: - index: test_1 - type: test - id: '$id' - - - match: { _index: test_1 } - - match: { _type: test } - - match: { _id: $id } - - match: { _version: 1 } - - match: { _source: { foo: bar }} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yaml index 48857522cb8..c8f6871295e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yaml @@ -1,11 +1,22 @@ --- setup: + - do: + indices.create: + index: test + body: + mappings: + test: + properties: + bigint: + type: keyword + + - do: index: index: test_1 type: test id: 1 - body: { "include": { "field1": "v1", "field2": "v2" }, "count": 1 } + body: { "include": { "field1": "v1", "field2": "v2" }, "count": 1, "bigint": 72057594037927936 } - do: indices.refresh: {} @@ -90,6 +101,17 @@ setup: - match: { hits.hits.0._source.include.field1: v1 } - is_false: hits.hits.0._source.include.field2 +--- +"_source include on bigint": + - do: + search: + body: + _source: + includes: bigint + query: { match_all: {} } + - match: { hits.hits.0._source.bigint: 72057594037927936 } + - is_false: hits.hits.0._source.include.field2 + --- "fields in body": - do: diff --git a/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java index fe624297e72..96921c3c90d 100644 --- a/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java +++ b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java @@ -20,13 +20,12 @@ package org.elasticsearch.bootstrap; import com.carrotsearch.randomizedtesting.RandomizedRunner; -import org.apache.log4j.Java9Hack; -import org.apache.lucene.util.Constants; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.SecureSM; import org.elasticsearch.common.Strings; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.logging.LogConfigurator; import org.elasticsearch.plugins.PluginInfo; import org.junit.Assert; @@ -91,10 +90,6 @@ public class BootstrapForTesting { throw new RuntimeException("found jar hell in test classpath", e); } - if (Constants.JRE_IS_MINIMUM_JAVA9) { - Java9Hack.fixLog4j(); - } - // install security manager if requested if (systemPropertyAsBoolean("tests.security.manager", true)) { try { diff --git a/test/framework/src/main/java/org/elasticsearch/common/logging/TestLoggers.java b/test/framework/src/main/java/org/elasticsearch/common/logging/TestLoggers.java new file mode 100644 index 00000000000..3b6cf7e2c94 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/common/logging/TestLoggers.java @@ -0,0 +1,57 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.logging; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.Appender; +import org.apache.logging.log4j.core.LoggerContext; +import org.apache.logging.log4j.core.config.AppenderRef; +import org.apache.logging.log4j.core.config.Configuration; +import org.apache.logging.log4j.core.config.LoggerConfig; + +public class TestLoggers { + + public static void addAppender(final Logger logger, final Appender appender) { + final LoggerContext ctx = (LoggerContext) LogManager.getContext(false); + final Configuration config = ctx.getConfiguration(); + config.addAppender(appender); + LoggerConfig loggerConfig = config.getLoggerConfig(logger.getName()); + if (!logger.getName().equals(loggerConfig.getName())) { + loggerConfig = new LoggerConfig(logger.getName(), logger.getLevel(), true); + config.addLogger(logger.getName(), loggerConfig); + } + loggerConfig.addAppender(appender, null, null); + ctx.updateLoggers(); + } + + public static void removeAppender(final Logger logger, final Appender appender) { + final LoggerContext ctx = (LoggerContext) LogManager.getContext(false); + final Configuration config = ctx.getConfiguration(); + LoggerConfig loggerConfig = config.getLoggerConfig(logger.getName()); + if (!logger.getName().equals(loggerConfig.getName())) { + loggerConfig = new LoggerConfig(logger.getName(), logger.getLevel(), true); + config.addLogger(logger.getName(), loggerConfig); + } + loggerConfig.removeAppender(appender.getName()); + ctx.updateLoggers(); + } + +} diff --git a/test/framework/src/main/java/org/elasticsearch/index/store/EsBaseDirectoryTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/store/EsBaseDirectoryTestCase.java index 638d24e7f9c..69dfae2c678 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/store/EsBaseDirectoryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/store/EsBaseDirectoryTestCase.java @@ -21,12 +21,13 @@ package org.elasticsearch.index.store; import com.carrotsearch.randomizedtesting.annotations.Listeners; import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; +import org.apache.logging.log4j.Logger; import org.apache.lucene.store.BaseDirectoryTestCase; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TimeUnits; import org.elasticsearch.bootstrap.BootstrapForTesting; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.junit.listeners.ReproduceInfoPrinter; /** @@ -40,9 +41,14 @@ import org.elasticsearch.test.junit.listeners.ReproduceInfoPrinter; @LuceneTestCase.SuppressSysoutChecks(bugUrl = "we log a lot on purpose") public abstract class EsBaseDirectoryTestCase extends BaseDirectoryTestCase { static { + try { + Class.forName("org.elasticsearch.test.ESTestCase"); + } catch (ClassNotFoundException e) { + throw new AssertionError(e); + } BootstrapForTesting.ensureInitialized(); } - protected final ESLogger logger = Loggers.getLogger(getClass()); + protected final Logger logger = Loggers.getLogger(getClass()); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java b/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java index 4440fbe117d..3c5f105e4d1 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java +++ b/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java @@ -20,19 +20,24 @@ package org.elasticsearch.test;/* import com.carrotsearch.randomizedtesting.RandomizedTest; import com.carrotsearch.randomizedtesting.generators.RandomInts; import com.carrotsearch.randomizedtesting.generators.RandomStrings; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.client.Client; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.junit.Assert; import java.io.IOException; import java.util.Random; +import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.Semaphore; @@ -45,17 +50,18 @@ import static org.hamcrest.Matchers.equalTo; public class BackgroundIndexer implements AutoCloseable { - private final ESLogger logger = Loggers.getLogger(getClass()); + private final Logger logger = Loggers.getLogger(getClass()); final Thread[] writers; final CountDownLatch stopLatch; final CopyOnWriteArrayList failures; final AtomicBoolean stop = new AtomicBoolean(false); final AtomicLong idGenerator = new AtomicLong(); - final AtomicLong indexCounter = new AtomicLong(); final CountDownLatch startLatch = new CountDownLatch(1); final AtomicBoolean hasBudget = new AtomicBoolean(false); // when set to true, writers will acquire writes from a semaphore final Semaphore availableBudget = new Semaphore(0); + final boolean useAutoGeneratedIDs; + private final Set ids = ConcurrentCollections.newConcurrentSet(); volatile int minFieldSize = 10; volatile int maxFieldSize = 140; @@ -116,6 +122,7 @@ public class BackgroundIndexer implements AutoCloseable { if (random == null) { random = RandomizedTest.getRandom(); } + useAutoGeneratedIDs = random.nextBoolean(); failures = new CopyOnWriteArrayList<>(); writers = new Thread[writerCount]; stopLatch = new CountDownLatch(writers.length); @@ -145,12 +152,17 @@ public class BackgroundIndexer implements AutoCloseable { BulkRequestBuilder bulkRequest = client.prepareBulk(); for (int i = 0; i < batchSize; i++) { id = idGenerator.incrementAndGet(); - bulkRequest.add(client.prepareIndex(index, type, Long.toString(id)).setSource(generateSource(id, threadRandom))); + if (useAutoGeneratedIDs) { + bulkRequest.add(client.prepareIndex(index, type).setSource(generateSource(id, threadRandom))); + } else { + bulkRequest.add(client.prepareIndex(index, type, Long.toString(id)).setSource(generateSource(id, threadRandom))); + } } BulkResponse bulkResponse = bulkRequest.get(); for (BulkItemResponse bulkItemResponse : bulkResponse) { if (!bulkItemResponse.isFailed()) { - indexCounter.incrementAndGet(); + boolean add = ids.add(bulkItemResponse.getId()); + assert add : "ID: " + bulkItemResponse.getId() + " already used"; } else { throw new ElasticsearchException("bulk request failure, id: [" + bulkItemResponse.getFailure().getId() + "] message: " + bulkItemResponse.getFailure().getMessage()); @@ -164,14 +176,24 @@ public class BackgroundIndexer implements AutoCloseable { continue; } id = idGenerator.incrementAndGet(); - client.prepareIndex(index, type, Long.toString(id)).setSource(generateSource(id, threadRandom)).get(); - indexCounter.incrementAndGet(); + if (useAutoGeneratedIDs) { + IndexResponse indexResponse = client.prepareIndex(index, type).setSource(generateSource(id, threadRandom)).get(); + boolean add = ids.add(indexResponse.getId()); + assert add : "ID: " + indexResponse.getId() + " already used"; + } else { + IndexResponse indexResponse = client.prepareIndex(index, type, Long.toString(id)).setSource(generateSource(id, threadRandom)).get(); + boolean add = ids.add(indexResponse.getId()); + assert add : "ID: " + indexResponse.getId() + " already used"; + } } } - logger.info("**** done indexing thread {} stop: {} numDocsIndexed: {}", indexerId, stop.get(), indexCounter.get()); + logger.info("**** done indexing thread {} stop: {} numDocsIndexed: {}", indexerId, stop.get(), ids.size()); } catch (Exception e) { failures.add(e); - logger.warn("**** failed indexing thread {} on doc id {}", e, indexerId, id); + final long docId = id; + logger.warn( + (Supplier) + () -> new ParameterizedMessage("**** failed indexing thread {} on doc id {}", indexerId, docId), e); } finally { stopLatch.countDown(); } @@ -259,7 +281,7 @@ public class BackgroundIndexer implements AutoCloseable { } public long totalIndexedDocs() { - return indexCounter.get(); + return ids.size(); } public Throwable[] getFailures() { @@ -284,4 +306,11 @@ public class BackgroundIndexer implements AutoCloseable { public void close() throws Exception { stop(); } + + /** + * Returns the ID set of all documents indexed by this indexer run + */ + public Set getIds() { + return this.ids; + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/CorruptionUtils.java b/test/framework/src/main/java/org/elasticsearch/test/CorruptionUtils.java index 916adc142c8..df306dfc9e3 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/CorruptionUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/CorruptionUtils.java @@ -19,13 +19,13 @@ package org.elasticsearch.test; import com.carrotsearch.randomizedtesting.generators.RandomPicks; +import org.apache.logging.log4j.Logger; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.store.ChecksumIndexInput; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FSDirectory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.ESLoggerFactory; import java.io.IOException; @@ -44,7 +44,7 @@ import static org.junit.Assert.assertTrue; public final class CorruptionUtils { - private static ESLogger logger = ESLoggerFactory.getLogger("test"); + private static Logger logger = ESLoggerFactory.getLogger("test"); private CorruptionUtils() {} /** diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index 784a0a4bede..adc3e1ec2d2 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -29,7 +29,7 @@ import com.carrotsearch.randomizedtesting.generators.RandomInts; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.carrotsearch.randomizedtesting.generators.RandomStrings; import com.carrotsearch.randomizedtesting.rules.TestRuleAdapter; - +import org.apache.logging.log4j.Logger; import org.apache.lucene.uninverting.UninvertingReader; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; @@ -43,8 +43,6 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.io.PathUtilsForTesting; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.MockBigArrays; @@ -63,8 +61,8 @@ import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.analysis.AnalysisModule; -import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.plugins.AnalysisPlugin; +import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.script.MockScriptEngine; import org.elasticsearch.script.ScriptModule; import org.elasticsearch.script.ScriptService; @@ -130,10 +128,17 @@ import static org.hamcrest.Matchers.equalTo; public abstract class ESTestCase extends LuceneTestCase { static { + System.setProperty("log4j.shutdownHookEnabled", "false"); + // we can not shutdown logging when tests are running or the next test that runs within the + // same JVM will try to initialize logging after a security manager has been installed and + // this will fail + System.setProperty("es.log4j.shutdownEnabled", "false"); + System.setProperty("log4j2.disable.jmx", "true"); + System.setProperty("log4j.skipJansi", "true"); // jython has this crazy shaded Jansi version that log4j2 tries to load BootstrapForTesting.ensureInitialized(); } - protected final ESLogger logger = Loggers.getLogger(getClass()); + protected final Logger logger = Loggers.getLogger(getClass()); // ----------------------------------------------------------------- // Suite and test case setup/cleanup. @@ -298,6 +303,14 @@ public abstract class ESTestCase extends LuceneTestCase { return random().nextInt(); } + public static long randomPositiveLong() { + long randomLong; + do { + randomLong = randomLong(); + } while (randomLong == Long.MIN_VALUE); + return Math.abs(randomLong); + } + public static float randomFloat() { return random().nextFloat(); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTokenStreamTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTokenStreamTestCase.java index 1af9fa5ba7e..c4bd9643657 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTokenStreamTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTokenStreamTestCase.java @@ -43,6 +43,11 @@ import org.elasticsearch.test.junit.listeners.ReproduceInfoPrinter; public abstract class ESTokenStreamTestCase extends BaseTokenStreamTestCase { static { + try { + Class.forName("org.elasticsearch.test.ESTestCase"); + } catch (ClassNotFoundException e) { + throw new AssertionError(e); + } BootstrapForTesting.ensureInitialized(); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ExternalNode.java b/test/framework/src/main/java/org/elasticsearch/test/ExternalNode.java index cde4e5f6ac9..8725ed815ad 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ExternalNode.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ExternalNode.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.test; +import org.apache.logging.log4j.Logger; import org.apache.lucene.util.Constants; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; @@ -27,7 +28,6 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.PathUtils; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; @@ -62,7 +62,7 @@ final class ExternalNode implements Closeable { private final String clusterName; private TransportClient client; - private final ESLogger logger = Loggers.getLogger(getClass()); + private final Logger logger = Loggers.getLogger(getClass()); private Settings externalNodeSettings; diff --git a/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java index 6f6ac8488d1..adab3b70455 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java @@ -19,6 +19,7 @@ package org.elasticsearch.test; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; @@ -27,7 +28,6 @@ import org.elasticsearch.client.Client; import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.breaker.CircuitBreaker; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; @@ -56,7 +56,7 @@ import static org.junit.Assert.assertThat; */ public final class ExternalTestCluster extends TestCluster { - private static final ESLogger logger = Loggers.getLogger(ExternalTestCluster.class); + private static final Logger logger = Loggers.getLogger(ExternalTestCluster.class); private static final AtomicInteger counter = new AtomicInteger(); public static final String EXTERNAL_CLUSTER_PREFIX = "external_"; diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 1dd1c5d9b64..8a3274ec727 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -24,6 +24,7 @@ import com.carrotsearch.randomizedtesting.SysGlobals; import com.carrotsearch.randomizedtesting.generators.RandomInts; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.carrotsearch.randomizedtesting.generators.RandomStrings; +import org.apache.logging.log4j.Logger; import org.apache.lucene.store.StoreRateLimiting; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; @@ -51,7 +52,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; @@ -147,7 +147,7 @@ import static org.junit.Assert.fail; */ public final class InternalTestCluster extends TestCluster { - private final ESLogger logger = Loggers.getLogger(getClass()); + private final Logger logger = Loggers.getLogger(getClass()); /** * The number of ports in the range used for this JVM diff --git a/test/framework/src/main/java/org/elasticsearch/test/OldIndexUtils.java b/test/framework/src/main/java/org/elasticsearch/test/OldIndexUtils.java index e1967256ddb..fe46251e3ee 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/OldIndexUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/OldIndexUtils.java @@ -19,6 +19,7 @@ package org.elasticsearch.test; +import org.apache.logging.log4j.Logger; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.util.TestUtil; import org.elasticsearch.Version; @@ -31,7 +32,6 @@ import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusResponse; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.elasticsearch.common.io.FileSystemUtils; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.IndexFolderUpgrader; @@ -61,7 +61,7 @@ import static org.junit.Assert.assertEquals; public class OldIndexUtils { - public static List loadIndexesList(String prefix, Path bwcIndicesPath) throws IOException { + public static List loadDataFilesList(String prefix, Path bwcIndicesPath) throws IOException { List indexes = new ArrayList<>(); try (DirectoryStream stream = Files.newDirectoryStream(bwcIndicesPath, prefix + "-*.zip")) { for (Path path : stream) { @@ -86,7 +86,7 @@ public class OldIndexUtils { IndexFolderUpgrader.upgradeIndicesIfNeeded(Settings.EMPTY, nodeEnvironment); } - public static void loadIndex(String indexName, String indexFile, Path unzipDir, Path bwcPath, ESLogger logger, Path... paths) throws + public static void loadIndex(String indexName, String indexFile, Path unzipDir, Path bwcPath, Logger logger, Path... paths) throws Exception { Path unzipDataDir = unzipDir.resolve("data"); @@ -128,7 +128,7 @@ public class OldIndexUtils { } // randomly distribute the files from src over dests paths - public static void copyIndex(final ESLogger logger, final Path src, final String indexName, final Path... dests) throws IOException { + public static void copyIndex(final Logger logger, final Path src, final String indexName, final Path... dests) throws IOException { Path destinationDataPath = dests[randomInt(dests.length - 1)]; for (Path dest : dests) { Path indexDir = dest.resolve(indexName); @@ -194,7 +194,7 @@ public class OldIndexUtils { } public static boolean isUpgraded(Client client, String index) throws Exception { - ESLogger logger = Loggers.getLogger(OldIndexUtils.class); + Logger logger = Loggers.getLogger(OldIndexUtils.class); int toUpgrade = 0; for (IndexUpgradeStatus status : getUpgradeStatus(client, index)) { logger.info("Index: {}, total: {}, toUpgrade: {}", status.getIndex(), status.getTotalBytes(), status.getToUpgradeBytes()); diff --git a/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java index 2629f655c95..124960fe921 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java @@ -20,12 +20,12 @@ package org.elasticsearch.test; import com.carrotsearch.hppc.ObjectArrayList; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.indices.IndexTemplateMissingException; @@ -45,7 +45,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcke */ public abstract class TestCluster implements Closeable { - protected final ESLogger logger = Loggers.getLogger(getClass()); + protected final Logger logger = Loggers.getLogger(getClass()); private final long seed; protected Random random; diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisruption.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisruption.java index 88748917fb9..4e135c4c2b0 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisruption.java +++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisruption.java @@ -20,7 +20,7 @@ package org.elasticsearch.test.disruption; import com.carrotsearch.randomizedtesting.generators.RandomPicks; -import org.elasticsearch.common.logging.ESLogger; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.set.Sets; @@ -45,7 +45,7 @@ import static org.junit.Assert.assertFalse; */ public class NetworkDisruption implements ServiceDisruptionScheme { - private final ESLogger logger = Loggers.getLogger(NetworkDisruption.class); + private final Logger logger = Loggers.getLogger(NetworkDisruption.class); private final DisruptedLinks disruptedLinks; private final NetworkLinkDisruptionType networkLinkDisruptionType; diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/SingleNodeDisruption.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/SingleNodeDisruption.java index 104fa76e597..862e18d7aca 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/disruption/SingleNodeDisruption.java +++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/SingleNodeDisruption.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.test.disruption; -import org.elasticsearch.common.logging.ESLogger; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.test.InternalTestCluster; @@ -28,7 +28,7 @@ import static org.junit.Assert.assertFalse; public abstract class SingleNodeDisruption implements ServiceDisruptionScheme { - protected final ESLogger logger = Loggers.getLogger(getClass()); + protected final Logger logger = Loggers.getLogger(getClass()); protected volatile String disruptedNode; protected volatile InternalTestCluster cluster; diff --git a/test/framework/src/main/java/org/elasticsearch/test/engine/AssertingSearcher.java b/test/framework/src/main/java/org/elasticsearch/test/engine/AssertingSearcher.java index a0f027bcbd8..d977a215434 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/engine/AssertingSearcher.java +++ b/test/framework/src/main/java/org/elasticsearch/test/engine/AssertingSearcher.java @@ -19,8 +19,8 @@ package org.elasticsearch.test.engine; +import org.apache.logging.log4j.Logger; import org.apache.lucene.search.IndexSearcher; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.shard.ShardId; @@ -35,12 +35,12 @@ class AssertingSearcher extends Engine.Searcher { private RuntimeException firstReleaseStack; private final Object lock = new Object(); private final int initialRefCount; - private final ESLogger logger; + private final Logger logger; private final AtomicBoolean closed = new AtomicBoolean(false); AssertingSearcher(IndexSearcher indexSearcher, final Engine.Searcher wrappedSearcher, ShardId shardId, - ESLogger logger) { + Logger logger) { super(wrappedSearcher.source(), indexSearcher); // we only use the given index searcher here instead of the IS of the wrapped searcher. the IS might be a wrapped searcher // with a wrapped reader. diff --git a/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java index 304e3047496..fbc4352b1e2 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.test.engine; +import org.apache.logging.log4j.Logger; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.FilterDirectoryReader; import org.apache.lucene.index.IndexReader; @@ -28,7 +29,6 @@ import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.SearcherManager; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -66,7 +66,7 @@ public final class MockEngineSupport { private final AtomicBoolean closing = new AtomicBoolean(false); - private final ESLogger logger = Loggers.getLogger(Engine.class); + private final Logger logger = Loggers.getLogger(Engine.class); private final ShardId shardId; private final QueryCache filterCache; private final QueryCachingPolicy filterCachingPolicy; diff --git a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java index 99d70fa604e..0009c21d6aa 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java +++ b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.test.junit.listeners; -import org.elasticsearch.common.logging.ESLogger; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.test.junit.annotations.TestLogging; @@ -69,7 +69,7 @@ public class LoggingListener extends RunListener { previousLoggingMap = reset(previousLoggingMap); } - private static ESLogger resolveLogger(String loggerName) { + private static Logger resolveLogger(String loggerName) { if (loggerName.equalsIgnoreCase("_root")) { return ESLoggerFactory.getRootLogger(); } @@ -83,9 +83,9 @@ public class LoggingListener extends RunListener { } Map previousValues = new HashMap<>(); for (Map.Entry entry : map.entrySet()) { - ESLogger esLogger = resolveLogger(entry.getKey()); - previousValues.put(entry.getKey(), esLogger.getLevel()); - esLogger.setLevel(entry.getValue()); + Logger logger = resolveLogger(entry.getKey()); + previousValues.put(entry.getKey(), logger.getLevel().toString()); + Loggers.setLevel(logger, entry.getValue()); } return previousValues; } @@ -110,8 +110,8 @@ public class LoggingListener extends RunListener { private Map reset(Map map) { if (map != null) { for (Map.Entry previousLogger : map.entrySet()) { - ESLogger esLogger = resolveLogger(previousLogger.getKey()); - esLogger.setLevel(previousLogger.getValue()); + Logger logger = resolveLogger(previousLogger.getKey()); + Loggers.setLevel(logger, previousLogger.getValue()); } } return null; diff --git a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java index 92433649553..93ad8bb1e9f 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java +++ b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java @@ -19,8 +19,8 @@ package org.elasticsearch.test.junit.listeners; import com.carrotsearch.randomizedtesting.ReproduceErrorMessageBuilder; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESTestCase; @@ -47,7 +47,7 @@ import static org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase.REST_TE */ public class ReproduceInfoPrinter extends RunListener { - protected final ESLogger logger = Loggers.getLogger(ESTestCase.class); + protected final Logger logger = Loggers.getLogger(ESTestCase.class); @Override public void testStarted(Description description) throws Exception { diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java index 41ae7d8c04f..8040c421dce 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java @@ -19,19 +19,18 @@ package org.elasticsearch.test.rest.yaml; import com.carrotsearch.randomizedtesting.RandomizedTest; - import org.apache.http.Header; import org.apache.http.HttpEntity; import org.apache.http.HttpHost; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; import org.apache.http.message.BasicHeader; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestApi; @@ -55,7 +54,7 @@ import java.util.Set; * REST calls. */ public class ClientYamlTestClient { - private static final ESLogger logger = Loggers.getLogger(ClientYamlTestClient.class); + private static final Logger logger = Loggers.getLogger(ClientYamlTestClient.class); //query_string params that don't need to be declared in the spec, they are supported by default private static final Set ALWAYS_ACCEPTED_QUERY_STRING_PARAMS = Sets.newHashSet("pretty", "source", "filter_path"); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java index 43feb238cc7..2f1e42c12cb 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java @@ -19,9 +19,9 @@ package org.elasticsearch.test.rest.yaml; import org.apache.http.HttpHost; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.client.RestClient; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestSpec; @@ -39,7 +39,7 @@ import java.util.Map; */ public class ClientYamlTestExecutionContext { - private static final ESLogger logger = Loggers.getLogger(ClientYamlTestExecutionContext.class); + private static final Logger logger = Loggers.getLogger(ClientYamlTestExecutionContext.class); private final Stash stash = new Stash(); @@ -75,8 +75,10 @@ public class ClientYamlTestExecutionContext { response = e.getRestTestResponse(); throw e; } finally { + // if we hit a bad exception the response is null + Object repsponseBody = response != null ? response.getBody() : null; //we always stash the last response body - stash.stashValue("body", response.getBody()); + stash.stashValue("body", repsponseBody); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Stash.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Stash.java index dff1e59762e..d9a4d957a25 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Stash.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Stash.java @@ -19,8 +19,8 @@ package org.elasticsearch.test.rest.yaml; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -39,7 +39,7 @@ import java.util.regex.Pattern; public class Stash implements ToXContent { private static final Pattern EXTENDED_KEY = Pattern.compile("\\$\\{([^}]+)\\}"); - private static final ESLogger logger = Loggers.getLogger(Stash.class); + private static final Logger logger = Loggers.getLogger(Stash.class); public static final Stash EMPTY = new Stash(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java index af4a8e4f51a..e233e9fab80 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java @@ -18,9 +18,9 @@ */ package org.elasticsearch.test.rest.yaml.section; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.test.rest.yaml.ClientYamlTestExecutionContext; @@ -66,7 +66,7 @@ import static org.junit.Assert.fail; */ public class DoSection implements ExecutableSection { - private static final ESLogger logger = Loggers.getLogger(DoSection.class); + private static final Logger logger = Loggers.getLogger(DoSection.class); private final XContentLocation location; private String catchParam; diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/GreaterThanAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/GreaterThanAssertion.java index 1e8f38e7a44..b531f180fd6 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/GreaterThanAssertion.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/GreaterThanAssertion.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.test.rest.yaml.section; -import org.elasticsearch.common.logging.ESLogger; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.XContentLocation; @@ -34,7 +34,7 @@ import static org.junit.Assert.fail; */ public class GreaterThanAssertion extends Assertion { - private static final ESLogger logger = Loggers.getLogger(GreaterThanAssertion.class); + private static final Logger logger = Loggers.getLogger(GreaterThanAssertion.class); public GreaterThanAssertion(XContentLocation location, String field, Object expectedValue) { super(location, field, expectedValue); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/GreaterThanEqualToAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/GreaterThanEqualToAssertion.java index 9c9936592c9..14b1a08a879 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/GreaterThanEqualToAssertion.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/GreaterThanEqualToAssertion.java @@ -19,7 +19,7 @@ package org.elasticsearch.test.rest.yaml.section; -import org.elasticsearch.common.logging.ESLogger; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.XContentLocation; @@ -35,7 +35,7 @@ import static org.junit.Assert.fail; */ public class GreaterThanEqualToAssertion extends Assertion { - private static final ESLogger logger = Loggers.getLogger(GreaterThanEqualToAssertion.class); + private static final Logger logger = Loggers.getLogger(GreaterThanEqualToAssertion.class); public GreaterThanEqualToAssertion(XContentLocation location, String field, Object expectedValue) { super(location, field, expectedValue); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/IsFalseAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/IsFalseAssertion.java index f679691c9ca..a356182ab46 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/IsFalseAssertion.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/IsFalseAssertion.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.test.rest.yaml.section; -import org.elasticsearch.common.logging.ESLogger; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.XContentLocation; @@ -35,7 +35,7 @@ import static org.junit.Assert.assertThat; */ public class IsFalseAssertion extends Assertion { - private static final ESLogger logger = Loggers.getLogger(IsFalseAssertion.class); + private static final Logger logger = Loggers.getLogger(IsFalseAssertion.class); public IsFalseAssertion(XContentLocation location, String field) { super(location, field, false); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/IsTrueAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/IsTrueAssertion.java index 12bd8a34ed6..76ca0de70d9 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/IsTrueAssertion.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/IsTrueAssertion.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.test.rest.yaml.section; -import org.elasticsearch.common.logging.ESLogger; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.XContentLocation; @@ -36,7 +36,7 @@ import static org.junit.Assert.assertThat; */ public class IsTrueAssertion extends Assertion { - private static final ESLogger logger = Loggers.getLogger(IsTrueAssertion.class); + private static final Logger logger = Loggers.getLogger(IsTrueAssertion.class); public IsTrueAssertion(XContentLocation location, String field) { super(location, field, true); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/LengthAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/LengthAssertion.java index 5fd111733d2..062b9ecd87f 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/LengthAssertion.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/LengthAssertion.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.test.rest.yaml.section; -import org.elasticsearch.common.logging.ESLogger; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.XContentLocation; @@ -36,7 +36,7 @@ import static org.junit.Assert.assertThat; */ public class LengthAssertion extends Assertion { - private static final ESLogger logger = Loggers.getLogger(LengthAssertion.class); + private static final Logger logger = Loggers.getLogger(LengthAssertion.class); public LengthAssertion(XContentLocation location, String field, Object expectedValue) { super(location, field, expectedValue); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/LessThanAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/LessThanAssertion.java index 2e9ab744480..591bd83fa62 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/LessThanAssertion.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/LessThanAssertion.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.test.rest.yaml.section; -import org.elasticsearch.common.logging.ESLogger; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.XContentLocation; @@ -35,7 +35,7 @@ import static org.junit.Assert.fail; */ public class LessThanAssertion extends Assertion { - private static final ESLogger logger = Loggers.getLogger(LessThanAssertion.class); + private static final Logger logger = Loggers.getLogger(LessThanAssertion.class); public LessThanAssertion(XContentLocation location, String field, Object expectedValue) { super(location, field, expectedValue); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/LessThanOrEqualToAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/LessThanOrEqualToAssertion.java index 46e25332b60..7c5710f689d 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/LessThanOrEqualToAssertion.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/LessThanOrEqualToAssertion.java @@ -19,7 +19,7 @@ package org.elasticsearch.test.rest.yaml.section; -import org.elasticsearch.common.logging.ESLogger; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.XContentLocation; @@ -35,7 +35,7 @@ import static org.junit.Assert.fail; */ public class LessThanOrEqualToAssertion extends Assertion { - private static final ESLogger logger = Loggers.getLogger(LessThanOrEqualToAssertion.class); + private static final Logger logger = Loggers.getLogger(LessThanOrEqualToAssertion.class); public LessThanOrEqualToAssertion(XContentLocation location, String field, Object expectedValue) { super(location, field, expectedValue); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/MatchAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/MatchAssertion.java index c2a52bf735c..2bfb94e6584 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/MatchAssertion.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/MatchAssertion.java @@ -18,8 +18,8 @@ */ package org.elasticsearch.test.rest.yaml.section; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.XContentLocation; @@ -44,7 +44,7 @@ import static org.junit.Assert.assertThat; */ public class MatchAssertion extends Assertion { - private static final ESLogger logger = Loggers.getLogger(MatchAssertion.class); + private static final Logger logger = Loggers.getLogger(MatchAssertion.class); public MatchAssertion(XContentLocation location, String field, Object expectedValue) { super(location, field, expectedValue); diff --git a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java index 1e84ca0fe71..057e7c48456 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java @@ -21,7 +21,7 @@ package org.elasticsearch.test.store; import com.carrotsearch.randomizedtesting.SeedUtils; import com.carrotsearch.randomizedtesting.generators.RandomPicks; - +import org.apache.logging.log4j.Logger; import org.apache.lucene.index.CheckIndex; import org.apache.lucene.store.BaseDirectoryWrapper; import org.apache.lucene.store.Directory; @@ -34,7 +34,6 @@ import org.apache.lucene.util.TestRuleMarkFailure; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -113,7 +112,7 @@ public class MockFSDirectoryService extends FsDirectoryService { throw new UnsupportedOperationException(); } - public static void checkIndex(ESLogger logger, Store store, ShardId shardId) { + public static void checkIndex(Logger logger, Store store, ShardId shardId) { if (store.tryIncRef()) { logger.info("start check index"); try { diff --git a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java index 57d9fe3ff05..70c8d2be113 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java +++ b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java @@ -19,13 +19,12 @@ package org.elasticsearch.test.store; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.IndexEventListener; @@ -98,7 +97,7 @@ public class MockFSIndexStore extends IndexStore { if (indexShard != null) { Boolean remove = shardSet.remove(indexShard); if (remove == Boolean.TRUE) { - ESLogger logger = Loggers.getLogger(getClass(), indexShard.indexSettings().getSettings(), indexShard.shardId()); + Logger logger = Loggers.getLogger(getClass(), indexShard.indexSettings().getSettings(), indexShard.shardId()); MockFSDirectoryService.checkIndex(logger, indexShard.store(), indexShard.shardId()); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/tasks/MockTaskManager.java b/test/framework/src/main/java/org/elasticsearch/test/tasks/MockTaskManager.java index ec695e8bd41..b07e7315b88 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/tasks/MockTaskManager.java +++ b/test/framework/src/main/java/org/elasticsearch/test/tasks/MockTaskManager.java @@ -19,6 +19,8 @@ package org.elasticsearch.test.tasks; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -51,7 +53,11 @@ public class MockTaskManager extends TaskManager { try { listener.onTaskRegistered(task); } catch (Exception e) { - logger.warn("failed to notify task manager listener about unregistering the task with id {}", e, task.getId()); + logger.warn( + (Supplier) () -> new ParameterizedMessage( + "failed to notify task manager listener about unregistering the task with id {}", + task.getId()), + e); } } } @@ -66,7 +72,9 @@ public class MockTaskManager extends TaskManager { try { listener.onTaskUnregistered(task); } catch (Exception e) { - logger.warn("failed to notify task manager listener about unregistering the task with id {}", e, task.getId()); + logger.warn( + (Supplier) () -> new ParameterizedMessage( + "failed to notify task manager listener about unregistering the task with id {}", task.getId()), e); } } } else { @@ -81,7 +89,11 @@ public class MockTaskManager extends TaskManager { try { listener.waitForTaskCompletion(task); } catch (Exception e) { - logger.warn("failed to notify task manager listener about waitForTaskCompletion the task with id {}", e, task.getId()); + logger.warn( + (Supplier) () -> new ParameterizedMessage( + "failed to notify task manager listener about waitForTaskCompletion the task with id {}", + task.getId()), + e); } } super.waitForTaskCompletion(task, untilInNanos); diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index 33c5fcccad1..c65e885a9bc 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -19,6 +19,8 @@ package org.elasticsearch.transport; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListenerResponseHandler; @@ -535,7 +537,8 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { listener.actionGet(); } catch (Exception e) { - logger.trace("caught exception while sending to node {}", e, nodeA); + logger.trace( + (Supplier) () -> new ParameterizedMessage("caught exception while sending to node {}", nodeA), e); } } } @@ -570,7 +573,8 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { } catch (ConnectTransportException e) { // ok! } catch (Exception e) { - logger.error("caught exception while sending to node {}", e, node); + logger.error( + (Supplier) () -> new ParameterizedMessage("caught exception while sending to node {}", node), e); sendingErrors.add(e); } } @@ -1684,7 +1688,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { @Override public void handleException(TransportException exp) { - logger.debug("---> received exception for id {}", exp, id); + logger.debug((Supplier) () -> new ParameterizedMessage("---> received exception for id {}", id), exp); allRequestsDone.countDown(); Throwable unwrap = ExceptionsHelper.unwrap(exp, IOException.class); assertNotNull(unwrap); diff --git a/test/framework/src/main/resources/log4j.properties b/test/framework/src/main/resources/log4j.properties deleted file mode 100644 index 87d4560f72f..00000000000 --- a/test/framework/src/main/resources/log4j.properties +++ /dev/null @@ -1,9 +0,0 @@ -tests.es.logger.level=INFO -log4j.rootLogger=${tests.es.logger.level}, out - -log4j.logger.org.apache.http=INFO, out -log4j.additivity.org.apache.http=false - -log4j.appender.out=org.apache.log4j.ConsoleAppender -log4j.appender.out.layout=org.apache.log4j.PatternLayout -log4j.appender.out.layout.conversionPattern=[%d{ISO8601}][%-5p][%-25c] %m%n diff --git a/test/framework/src/main/resources/log4j2-test.properties b/test/framework/src/main/resources/log4j2-test.properties new file mode 100644 index 00000000000..9cfe3e326aa --- /dev/null +++ b/test/framework/src/main/resources/log4j2-test.properties @@ -0,0 +1,9 @@ +status = error + +appender.console.type = Console +appender.console.name = console +appender.console.layout.type = PatternLayout +appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %m%n + +rootLogger.level = info +rootLogger.appenderRef.console.ref = console diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/LoggingListenerTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/LoggingListenerTests.java index c3a107e39fe..2d428202741 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/test/LoggingListenerTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/test/LoggingListenerTests.java @@ -19,7 +19,8 @@ package org.elasticsearch.test.test; -import org.elasticsearch.common.logging.ESLogger; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.junit.annotations.TestLogging; @@ -30,7 +31,6 @@ import org.junit.runner.Result; import java.lang.reflect.Method; import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.nullValue; public class LoggingListenerTests extends ESTestCase { @@ -47,29 +47,30 @@ public class LoggingListenerTests extends ESTestCase { Description suiteDescription = Description.createSuiteDescription(TestClass.class); - ESLogger abcLogger = Loggers.getLogger("abc"); - ESLogger xyzLogger = Loggers.getLogger("xyz"); + Logger xyzLogger = Loggers.getLogger("xyz"); + Logger abcLogger = Loggers.getLogger("abc"); - assertThat(abcLogger.getLevel(), nullValue()); - assertThat(xyzLogger.getLevel(), nullValue()); + assertEquals(Level.INFO, abcLogger.getLevel()); + assertThat(xyzLogger.getLevel(), equalTo(Level.INFO)); + assertThat(abcLogger.getLevel(), equalTo(Level.INFO)); loggingListener.testRunStarted(suiteDescription); - assertThat(xyzLogger.getLevel(), nullValue()); - assertThat(abcLogger.getLevel(), nullValue()); + assertThat(xyzLogger.getLevel(), equalTo(Level.INFO)); + assertThat(abcLogger.getLevel(), equalTo(Level.INFO)); Method method = TestClass.class.getMethod("annotatedTestMethod"); TestLogging annotation = method.getAnnotation(TestLogging.class); Description testDescription = Description.createTestDescription(LoggingListenerTests.class, "annotatedTestMethod", annotation); loggingListener.testStarted(testDescription); - assertThat(xyzLogger.getLevel(), equalTo("TRACE")); - assertThat(abcLogger.getLevel(), nullValue()); + assertThat(xyzLogger.getLevel(), equalTo(Level.TRACE)); + assertThat(abcLogger.getLevel(), equalTo(Level.INFO)); loggingListener.testFinished(testDescription); - assertThat(xyzLogger.getLevel(), nullValue()); - assertThat(abcLogger.getLevel(), nullValue()); + assertThat(xyzLogger.getLevel(), equalTo(Level.INFO)); + assertThat(abcLogger.getLevel(), equalTo(Level.INFO)); loggingListener.testRunFinished(new Result()); - assertThat(xyzLogger.getLevel(), nullValue()); - assertThat(abcLogger.getLevel(), nullValue()); + assertThat(xyzLogger.getLevel(), equalTo(Level.INFO)); + assertThat(abcLogger.getLevel(), equalTo(Level.INFO)); } public void testCustomLevelPerClass() throws Exception { @@ -77,27 +78,27 @@ public class LoggingListenerTests extends ESTestCase { Description suiteDescription = Description.createSuiteDescription(AnnotatedTestClass.class); - ESLogger abcLogger = Loggers.getLogger("abc"); - ESLogger xyzLogger = Loggers.getLogger("xyz"); + Logger abcLogger = Loggers.getLogger("abc"); + Logger xyzLogger = Loggers.getLogger("xyz"); - assertThat(xyzLogger.getLevel(), nullValue()); - assertThat(abcLogger.getLevel(), nullValue()); + assertThat(xyzLogger.getLevel(), equalTo(Level.INFO)); + assertThat(abcLogger.getLevel(), equalTo(Level.INFO)); loggingListener.testRunStarted(suiteDescription); - assertThat(abcLogger.getLevel(), equalTo("ERROR")); - assertThat(xyzLogger.getLevel(), nullValue()); + assertThat(xyzLogger.getLevel(), equalTo(Level.INFO)); + assertThat(abcLogger.getLevel(), equalTo(Level.WARN)); Description testDescription = Description.createTestDescription(LoggingListenerTests.class, "test"); loggingListener.testStarted(testDescription); - assertThat(abcLogger.getLevel(), equalTo("ERROR")); - assertThat(xyzLogger.getLevel(), nullValue()); + assertThat(xyzLogger.getLevel(), equalTo(Level.INFO)); + assertThat(abcLogger.getLevel(), equalTo(Level.WARN)); loggingListener.testFinished(testDescription); - assertThat(abcLogger.getLevel(), equalTo("ERROR")); - assertThat(xyzLogger.getLevel(), nullValue()); + assertThat(xyzLogger.getLevel(), equalTo(Level.INFO)); + assertThat(abcLogger.getLevel(), equalTo(Level.WARN)); loggingListener.testRunFinished(new Result()); - assertThat(abcLogger.getLevel(), nullValue()); - assertThat(xyzLogger.getLevel(), nullValue()); + assertThat(xyzLogger.getLevel(), equalTo(Level.INFO)); + assertThat(abcLogger.getLevel(), equalTo(Level.INFO)); } public void testCustomLevelPerClassAndPerMethod() throws Exception { @@ -105,49 +106,54 @@ public class LoggingListenerTests extends ESTestCase { Description suiteDescription = Description.createSuiteDescription(AnnotatedTestClass.class); - ESLogger abcLogger = Loggers.getLogger("abc"); - ESLogger xyzLogger = Loggers.getLogger("xyz"); + Logger abcLogger = Loggers.getLogger("abc"); + Logger xyzLogger = Loggers.getLogger("xyz"); - assertThat(xyzLogger.getLevel(), nullValue()); - assertThat(abcLogger.getLevel(), nullValue()); + assertThat(xyzLogger.getLevel(), equalTo(Level.INFO)); + assertThat(abcLogger.getLevel(), equalTo(Level.INFO)); loggingListener.testRunStarted(suiteDescription); - assertThat(abcLogger.getLevel(), equalTo("ERROR")); - assertThat(xyzLogger.getLevel(), nullValue()); + assertThat(xyzLogger.getLevel(), equalTo(Level.INFO)); + assertThat(abcLogger.getLevel(), equalTo(Level.WARN)); Method method = TestClass.class.getMethod("annotatedTestMethod"); TestLogging annotation = method.getAnnotation(TestLogging.class); Description testDescription = Description.createTestDescription(LoggingListenerTests.class, "annotatedTestMethod", annotation); loggingListener.testStarted(testDescription); - assertThat(abcLogger.getLevel(), equalTo("ERROR")); - assertThat(xyzLogger.getLevel(), equalTo("TRACE")); + assertThat(xyzLogger.getLevel(), equalTo(Level.TRACE)); + assertThat(abcLogger.getLevel(), equalTo(Level.WARN)); loggingListener.testFinished(testDescription); - assertThat(abcLogger.getLevel(), equalTo("ERROR")); - assertThat(xyzLogger.getLevel(), nullValue()); + assertThat(xyzLogger.getLevel(), equalTo(Level.INFO)); + assertThat(abcLogger.getLevel(), equalTo(Level.WARN)); Method method2 = TestClass.class.getMethod("annotatedTestMethod2"); TestLogging annotation2 = method2.getAnnotation(TestLogging.class); Description testDescription2 = Description.createTestDescription(LoggingListenerTests.class, "annotatedTestMethod2", annotation2); loggingListener.testStarted(testDescription2); - assertThat(abcLogger.getLevel(), equalTo("TRACE")); - assertThat(xyzLogger.getLevel(), equalTo("DEBUG")); + assertThat(xyzLogger.getLevel(), equalTo(Level.DEBUG)); + assertThat(abcLogger.getLevel(), equalTo(Level.TRACE)); loggingListener.testFinished(testDescription2); - assertThat(abcLogger.getLevel(), equalTo("ERROR")); - assertThat(xyzLogger.getLevel(), nullValue()); + assertThat(xyzLogger.getLevel(), equalTo(Level.INFO)); + assertThat(abcLogger.getLevel(), equalTo(Level.WARN)); loggingListener.testRunFinished(new Result()); - assertThat(abcLogger.getLevel(), nullValue()); - assertThat(xyzLogger.getLevel(), nullValue()); + assertThat(xyzLogger.getLevel(), equalTo(Level.INFO)); + assertThat(abcLogger.getLevel(), equalTo(Level.INFO)); } - @TestLogging("abc:ERROR") + /** + * dummy class used to create a junit suite description that has the @TestLogging annotation + */ + @TestLogging("abc:WARN") public static class AnnotatedTestClass { - //dummy class used to create a junit suite description that has the @TestLogging annotation + } + /** + * dummy class used to create a junit suite description that doesn't have the @TestLogging annotation, but its test methods have it + */ public static class TestClass { - //dummy class used to create a junit suite description that doesn't have the @TestLogging annotation, but its test methods have it @SuppressWarnings("unused") @TestLogging("xyz:TRACE") @@ -156,5 +162,7 @@ public class LoggingListenerTests extends ESTestCase { @SuppressWarnings("unused") @TestLogging("abc:TRACE,xyz:DEBUG") public void annotatedTestMethod2() {} + } + } diff --git a/test/logger-usage/build.gradle b/test/logger-usage/build.gradle index 1a5815cf76e..a0acd58cff4 100644 --- a/test/logger-usage/build.gradle +++ b/test/logger-usage/build.gradle @@ -24,7 +24,8 @@ dependencies { testCompile "org.elasticsearch.test:framework:${version}" } -loggerUsageCheck.enabled = false +// https://github.com/elastic/elasticsearch/issues/20243 +// loggerUsageCheck.enabled = false forbiddenApisMain.enabled = true // disabled by parent project forbiddenApisMain { diff --git a/test/logger-usage/src/test/java/org/elasticsearch/test/loggerusage/ESLoggerUsageTests.java b/test/logger-usage/src/test/java/org/elasticsearch/test/loggerusage/ESLoggerUsageTests.java index 73449f4351c..8d2d772d567 100644 --- a/test/logger-usage/src/test/java/org/elasticsearch/test/loggerusage/ESLoggerUsageTests.java +++ b/test/logger-usage/src/test/java/org/elasticsearch/test/loggerusage/ESLoggerUsageTests.java @@ -19,10 +19,12 @@ package org.elasticsearch.test.loggerusage; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.common.SuppressLoggerChecks; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.test.loggerusage.ESLoggerUsageChecker.WrongLoggerUsage; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.loggerusage.ESLoggerUsageChecker.WrongLoggerUsage; import java.io.IOException; import java.io.InputStream; @@ -37,6 +39,12 @@ import static org.hamcrest.Matchers.notNullValue; public class ESLoggerUsageTests extends ESTestCase { + // needed to avoid the test suite from failing for having no tests + public void testSoThatTestsDoNotFail() { + + } + + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/20243") public void testLoggerUsageChecks() throws IOException { for (Method method : getClass().getMethods()) { if (method.getDeclaringClass().equals(getClass())) { @@ -46,9 +54,9 @@ public class ESLoggerUsageTests extends ESTestCase { List errors = new ArrayList<>(); ESLoggerUsageChecker.check(errors::add, classInputStream, Predicate.isEqual(method.getName())); if (method.getName().startsWith("checkFail")) { - assertFalse("Expected " + method.getName() + " to have wrong ESLogger usage", errors.isEmpty()); + assertFalse("Expected " + method.getName() + " to have wrong Logger usage", errors.isEmpty()); } else { - assertTrue("Method " + method.getName() + " has unexpected ESLogger usage errors: " + errors, errors.isEmpty()); + assertTrue("Method " + method.getName() + " has unexpected Logger usage errors: " + errors, errors.isEmpty()); } } else { assertTrue("only allow methods starting with test or check in this class", method.getName().startsWith("test")); @@ -57,11 +65,12 @@ public class ESLoggerUsageTests extends ESTestCase { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/20243") public void testLoggerUsageCheckerCompatibilityWithESLogger() throws NoSuchMethodException { - assertThat(ESLoggerUsageChecker.LOGGER_CLASS, equalTo(ESLogger.class.getName())); + assertThat(ESLoggerUsageChecker.LOGGER_CLASS, equalTo(Logger.class.getName())); assertThat(ESLoggerUsageChecker.THROWABLE_CLASS, equalTo(Throwable.class.getName())); int varargsMethodCount = 0; - for (Method method : ESLogger.class.getMethods()) { + for (Method method : Logger.class.getMethods()) { if (method.isVarArgs()) { // check that logger usage checks all varargs methods assertThat(ESLoggerUsageChecker.LOGGER_METHODS, hasItem(method.getName())); @@ -74,8 +83,8 @@ public class ESLoggerUsageTests extends ESTestCase { // check that signature is same as we expect in the usage checker for (String methodName : ESLoggerUsageChecker.LOGGER_METHODS) { - assertThat(ESLogger.class.getMethod(methodName, String.class, Object[].class), notNullValue()); - assertThat(ESLogger.class.getMethod(methodName, String.class, Throwable.class, Object[].class), notNullValue()); + assertThat(Logger.class.getMethod(methodName, String.class, Object[].class), notNullValue()); + assertThat(Logger.class.getMethod(methodName, String.class, Throwable.class, Object[].class), notNullValue()); } } @@ -114,7 +123,7 @@ public class ESLoggerUsageTests extends ESTestCase { } public void checkOrderOfExceptionArgument1() { - logger.info("Hello {}", new Exception(), "world"); + logger.info((Supplier) () -> new ParameterizedMessage("Hello {}", "world"), new Exception()); } public void checkFailOrderOfExceptionArgument1() { @@ -122,7 +131,7 @@ public class ESLoggerUsageTests extends ESTestCase { } public void checkOrderOfExceptionArgument2() { - logger.info("Hello {}, {}", new Exception(), "world", 42); + logger.info((Supplier) () -> new ParameterizedMessage("Hello {}, {}", "world", 42), new Exception()); } public void checkFailOrderOfExceptionArgument2() { @@ -134,7 +143,7 @@ public class ESLoggerUsageTests extends ESTestCase { } public void checkFailNonConstantMessageWithArguments(boolean b) { - logger.info(Boolean.toString(b), new Exception(), 42); + logger.info((Supplier) () -> new ParameterizedMessage(Boolean.toString(b), 42), new Exception()); } public void checkComplexUsage(boolean b) { @@ -166,4 +175,5 @@ public class ESLoggerUsageTests extends ESTestCase { } logger.info(message, args); } + }