diff --git a/.github/CONTRIBUTING.md b/CONTRIBUTING.md similarity index 100% rename from .github/CONTRIBUTING.md rename to CONTRIBUTING.md diff --git a/build.gradle b/build.gradle index b419bf01e15..6ab00d73881 100644 --- a/build.gradle +++ b/build.gradle @@ -116,6 +116,7 @@ subprojects { "org.elasticsearch.distribution.tar:elasticsearch:${version}": ':distribution:tar', "org.elasticsearch.distribution.rpm:elasticsearch:${version}": ':distribution:rpm', "org.elasticsearch.distribution.deb:elasticsearch:${version}": ':distribution:deb', + "org.elasticsearch.test:logger-usage:${version}": ':test:logger-usage', ] configurations.all { resolutionStrategy.dependencySubstitution { DependencySubstitutions subs -> diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LoggerUsageTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LoggerUsageTask.groovy new file mode 100644 index 00000000000..b280a74db58 --- /dev/null +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LoggerUsageTask.groovy @@ -0,0 +1,98 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle.precommit + +import org.elasticsearch.gradle.LoggedExec +import org.gradle.api.file.FileCollection +import org.gradle.api.tasks.InputFiles +import org.gradle.api.tasks.OutputFile + +/** + * Runs LoggerUsageCheck on a set of directories. + */ +public class LoggerUsageTask extends LoggedExec { + + /** + * We use a simple "marker" file that we touch when the task succeeds + * as the task output. This is compared against the modified time of the + * inputs (ie the jars/class files). + */ + private File successMarker = new File(project.buildDir, 'markers/loggerUsage') + + private FileCollection classpath; + + private List classDirectories; + + public LoggerUsageTask() { + project.afterEvaluate { + dependsOn(classpath) + description = "Runs LoggerUsageCheck on ${classDirectories}" + executable = new File(project.javaHome, 'bin/java') + if (classDirectories == null) { + classDirectories = [] + if (project.sourceSets.findByName("main") && project.sourceSets.main.output.classesDir.exists()) { + classDirectories += [project.sourceSets.main.output.classesDir] + dependsOn project.tasks.classes + } + if (project.sourceSets.findByName("test") && project.sourceSets.test.output.classesDir.exists()) { + classDirectories += [project.sourceSets.test.output.classesDir] + dependsOn project.tasks.testClasses + } + } + doFirst({ + args('-cp', getClasspath().asPath, 'org.elasticsearch.test.loggerusage.ESLoggerUsageChecker') + getClassDirectories().each { + args it.getAbsolutePath() + } + }) + doLast({ + successMarker.parentFile.mkdirs() + successMarker.setText("", 'UTF-8') + }) + } + } + + @InputFiles + FileCollection getClasspath() { + return classpath + } + + void setClasspath(FileCollection classpath) { + this.classpath = classpath + } + + @InputFiles + List getClassDirectories() { + return classDirectories + } + + void setClassDirectories(List classDirectories) { + this.classDirectories = classDirectories + } + + @OutputFile + File getSuccessMarker() { + return successMarker + } + + void setSuccessMarker(File successMarker) { + this.successMarker = successMarker + } +} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy index ab524351274..cbd72f2c7da 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy @@ -34,6 +34,7 @@ class PrecommitTasks { configureForbiddenApis(project), configureCheckstyle(project), configureNamingConventions(project), + configureLoggerUsage(project), project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class), project.tasks.create('licenseHeaders', LicenseHeadersTask.class), project.tasks.create('jarHell', JarHellTask.class), @@ -64,20 +65,21 @@ class PrecommitTasks { internalRuntimeForbidden = true failOnUnsupportedJava = false bundledSignatures = ['jdk-unsafe', 'jdk-deprecated'] - signaturesURLs = [getClass().getResource('/forbidden/all-signatures.txt')] + signaturesURLs = [getClass().getResource('/forbidden/jdk-signatures.txt'), + getClass().getResource('/forbidden/es-all-signatures.txt')] suppressAnnotations = ['**.SuppressForbidden'] } Task mainForbidden = project.tasks.findByName('forbiddenApisMain') if (mainForbidden != null) { mainForbidden.configure { bundledSignatures += 'jdk-system-out' - signaturesURLs += getClass().getResource('/forbidden/core-signatures.txt') + signaturesURLs += getClass().getResource('/forbidden/es-core-signatures.txt') } } Task testForbidden = project.tasks.findByName('forbiddenApisTest') if (testForbidden != null) { testForbidden.configure { - signaturesURLs += getClass().getResource('/forbidden/test-signatures.txt') + signaturesURLs += getClass().getResource('/forbidden/es-test-signatures.txt') } } Task forbiddenApis = project.tasks.findByName('forbiddenApis') @@ -117,4 +119,18 @@ class PrecommitTasks { } return null } + + private static Task configureLoggerUsage(Project project) { + Task loggerUsageTask = project.tasks.create('loggerUsageCheck', LoggerUsageTask.class) + + project.configurations.create('loggerUsagePlugin') + project.dependencies.add('loggerUsagePlugin', + "org.elasticsearch.test:logger-usage:${org.elasticsearch.gradle.VersionProperties.elasticsearch}") + + loggerUsageTask.configure { + classpath = project.configurations.loggerUsagePlugin + } + + return loggerUsageTask + } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy index c9db5657ba4..3e8b6225329 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy @@ -49,6 +49,15 @@ class ClusterConfiguration { @Input String jvmArgs = System.getProperty('tests.jvm.argline', '') + /** + * The seed nodes port file. In the case the cluster has more than one node we use a seed node + * to form the cluster. The file is null if there is no seed node yet available. + * + * Note: this can only be null if the cluster has only one node or if the first node is not yet + * configured. All nodes but the first node should see a non null value. + */ + File seedNodePortsFile + /** * A closure to call before the cluster is considered ready. The closure is passed the node info, * as well as a groovy AntBuilder, to enable running ant condition checks. The default wait @@ -119,4 +128,12 @@ class ClusterConfiguration { } extraConfigFiles.put(path, sourceFile) } + + /** Returns an address and port suitable for a uri to connect to this clusters seed node over transport protocol*/ + String seedNodeTransportUri() { + if (seedNodePortsFile != null) { + return seedNodePortsFile.readLines("UTF-8").get(0) + } + return null; + } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index d96ee511051..59a27ea36bd 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -58,6 +58,13 @@ class ClusterFormationTasks { List nodes = [] for (int i = 0; i < config.numNodes; ++i) { NodeInfo node = new NodeInfo(config, i, project, task) + if (i == 0) { + if (config.seedNodePortsFile != null) { + // we might allow this in the future to be set but for now we are the only authority to set this! + throw new GradleException("seedNodePortsFile has a non-null value but first node has not been intialized") + } + config.seedNodePortsFile = node.transportPortsFile; + } nodes.add(node) startTasks.add(configureNode(project, task, node)) } @@ -220,20 +227,22 @@ class ClusterFormationTasks { 'node.testattr' : 'test', 'repositories.url.allowed_urls': 'http://snapshot.test*' ] - if (node.config.numNodes == 1) { - esConfig['http.port'] = node.config.httpPort - esConfig['transport.tcp.port'] = node.config.transportPort - } else { - // TODO: fix multi node so it doesn't use hardcoded prots - esConfig['http.port'] = 9400 + node.nodeNum - esConfig['transport.tcp.port'] = 9500 + node.nodeNum - esConfig['discovery.zen.ping.unicast.hosts'] = (0.. 0) { // multi-node cluster case, we have to wait for the seed node to startup + ant.waitfor(maxwait: '20', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond') { + resourceexists { + file(file: node.config.seedNodePortsFile.toString()) + } + } + // the seed node is enough to form the cluster - all subsequent nodes will get the seed node as a unicast + // host and join the cluster via that. + esConfig['discovery.zen.ping.unicast.hosts'] = "\"${node.config.seedNodeTransportUri()}\"" + } File configFile = new File(node.confDir, 'elasticsearch.yml') logger.info("Configuring ${configFile}") configFile.setText(esConfig.collect { key, value -> "${key}: ${value}" }.join('\n'), 'UTF-8') diff --git a/buildSrc/src/main/resources/forbidden/es-all-signatures.txt b/buildSrc/src/main/resources/forbidden/es-all-signatures.txt new file mode 100644 index 00000000000..d258c098911 --- /dev/null +++ b/buildSrc/src/main/resources/forbidden/es-all-signatures.txt @@ -0,0 +1,30 @@ +# Licensed to Elasticsearch under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on +# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +# either express or implied. See the License for the specific +# language governing permissions and limitations under the License. + +java.nio.file.Paths @ Use org.elasticsearch.common.io.PathUtils.get() instead. +java.nio.file.FileSystems#getDefault() @ use org.elasticsearch.common.io.PathUtils.getDefaultFileSystem() instead. + +java.nio.file.Files#getFileStore(java.nio.file.Path) @ Use org.elasticsearch.env.Environment.getFileStore() instead, impacted by JDK-8034057 +java.nio.file.Files#isWritable(java.nio.file.Path) @ Use org.elasticsearch.env.Environment.isWritable() instead, impacted by JDK-8034057 + +@defaultMessage Use org.elasticsearch.common.Randomness#get for reproducible sources of randomness +java.util.Random#() +java.util.concurrent.ThreadLocalRandom + +java.security.MessageDigest#clone() @ use org.elasticsearch.common.hash.MessageDigests + +@defaultMessage this should not have been added to lucene in the first place +org.apache.lucene.index.IndexReader#getCombinedCoreAndDeletesKey() diff --git a/buildSrc/src/main/resources/forbidden/core-signatures.txt b/buildSrc/src/main/resources/forbidden/es-core-signatures.txt similarity index 100% rename from buildSrc/src/main/resources/forbidden/core-signatures.txt rename to buildSrc/src/main/resources/forbidden/es-core-signatures.txt diff --git a/buildSrc/src/main/resources/forbidden/test-signatures.txt b/buildSrc/src/main/resources/forbidden/es-test-signatures.txt similarity index 100% rename from buildSrc/src/main/resources/forbidden/test-signatures.txt rename to buildSrc/src/main/resources/forbidden/es-test-signatures.txt diff --git a/buildSrc/src/main/resources/forbidden/all-signatures.txt b/buildSrc/src/main/resources/forbidden/jdk-signatures.txt similarity index 85% rename from buildSrc/src/main/resources/forbidden/all-signatures.txt rename to buildSrc/src/main/resources/forbidden/jdk-signatures.txt index 9bc37005514..994b1ad3a4a 100644 --- a/buildSrc/src/main/resources/forbidden/all-signatures.txt +++ b/buildSrc/src/main/resources/forbidden/jdk-signatures.txt @@ -33,9 +33,6 @@ java.util.Formatter#(java.lang.String,java.lang.String,java.util.Locale) java.io.RandomAccessFile java.nio.file.Path#toFile() -java.nio.file.Paths @ Use org.elasticsearch.common.io.PathUtils.get() instead. -java.nio.file.FileSystems#getDefault() @ use org.elasticsearch.common.io.PathUtils.getDefaultFileSystem() instead. - @defaultMessage Specify a location for the temp file/directory instead. java.nio.file.Files#createTempDirectory(java.lang.String,java.nio.file.attribute.FileAttribute[]) java.nio.file.Files#createTempFile(java.lang.String,java.lang.String,java.nio.file.attribute.FileAttribute[]) @@ -48,9 +45,6 @@ java.io.ObjectInput java.nio.file.Files#isHidden(java.nio.file.Path) @ Dependent on the operating system, use FileSystemUtils.isHidden instead -java.nio.file.Files#getFileStore(java.nio.file.Path) @ Use org.elasticsearch.env.Environment.getFileStore() instead, impacted by JDK-8034057 -java.nio.file.Files#isWritable(java.nio.file.Path) @ Use org.elasticsearch.env.Environment.isWritable() instead, impacted by JDK-8034057 - @defaultMessage Resolve hosts explicitly to the address(es) you want with InetAddress. java.net.InetSocketAddress#(java.lang.String,int) java.net.Socket#(java.lang.String,int) @@ -89,9 +83,6 @@ java.lang.Class#getDeclaredMethods() @ Do not violate java's access system: Use java.lang.reflect.AccessibleObject#setAccessible(boolean) java.lang.reflect.AccessibleObject#setAccessible(java.lang.reflect.AccessibleObject[], boolean) -@defaultMessage this should not have been added to lucene in the first place -org.apache.lucene.index.IndexReader#getCombinedCoreAndDeletesKey() - @defaultMessage this method needs special permission java.lang.Thread#getAllStackTraces() @@ -112,8 +103,3 @@ java.util.Collections#EMPTY_MAP java.util.Collections#EMPTY_SET java.util.Collections#shuffle(java.util.List) @ Use java.util.Collections#shuffle(java.util.List, java.util.Random) with a reproducible source of randomness -@defaultMessage Use org.elasticsearch.common.Randomness#get for reproducible sources of randomness -java.util.Random#() -java.util.concurrent.ThreadLocalRandom - -java.security.MessageDigest#clone() @ use org.elasticsearch.common.hash.MessageDigests diff --git a/buildSrc/version.properties b/buildSrc/version.properties index f75d5a936bb..39c32192052 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ elasticsearch = 5.0.0 -lucene = 6.0.0-snapshot-bea235f +lucene = 6.0.0-snapshot-f0aa4fc # optional dependencies spatial4j = 0.6 diff --git a/core/build.gradle b/core/build.gradle index 39c1e4367c0..ab3754e72ff 100644 --- a/core/build.gradle +++ b/core/build.gradle @@ -49,7 +49,7 @@ dependencies { compile 'org.elasticsearch:securesm:1.0' // utilities - compile 'commons-cli:commons-cli:1.3.1' + compile 'net.sf.jopt-simple:jopt-simple:4.9' compile 'com.carrotsearch:hppc:0.7.1' // time handling, remove with java 8 time diff --git a/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java b/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java index a7c53a56bc4..6ddd7591caa 100644 --- a/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java +++ b/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java @@ -787,8 +787,9 @@ public class MapperQueryParser extends QueryParser { assert q instanceof BoostQuery == false; return pq; } else if (q instanceof MultiPhraseQuery) { - ((MultiPhraseQuery) q).setSlop(slop); - return q; + MultiPhraseQuery.Builder builder = new MultiPhraseQuery.Builder((MultiPhraseQuery) q); + builder.setSlop(slop); + return builder.build(); } else { return q; } diff --git a/core/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java b/core/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java index 089b649cefe..3c0bda97347 100644 --- a/core/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java +++ b/core/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java @@ -34,7 +34,6 @@ import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery; import java.io.IOException; import java.util.Collection; -import java.util.List; /** * @@ -68,7 +67,7 @@ public class CustomFieldQuery extends FieldQuery { flatten(((FiltersFunctionScoreQuery) sourceQuery).getSubQuery(), reader, flatQueries, boost); } else if (sourceQuery instanceof MultiPhraseQuery) { MultiPhraseQuery q = ((MultiPhraseQuery) sourceQuery); - convertMultiPhraseQuery(0, new int[q.getTermArrays().size()], q, q.getTermArrays(), q.getPositions(), reader, flatQueries); + convertMultiPhraseQuery(0, new int[q.getTermArrays().length], q, q.getTermArrays(), q.getPositions(), reader, flatQueries); } else if (sourceQuery instanceof BlendedTermQuery) { final BlendedTermQuery blendedTermQuery = (BlendedTermQuery) sourceQuery; flatten(blendedTermQuery.rewrite(reader), reader, flatQueries, boost); @@ -77,7 +76,7 @@ public class CustomFieldQuery extends FieldQuery { } } - private void convertMultiPhraseQuery(int currentPos, int[] termsIdx, MultiPhraseQuery orig, List terms, int[] pos, IndexReader reader, Collection flatQueries) throws IOException { + private void convertMultiPhraseQuery(int currentPos, int[] termsIdx, MultiPhraseQuery orig, Term[][] terms, int[] pos, IndexReader reader, Collection flatQueries) throws IOException { if (currentPos == 0) { // if we have more than 16 terms int numTerms = 0; @@ -97,16 +96,16 @@ public class CustomFieldQuery extends FieldQuery { * we walk all possible ways and for each path down the MPQ we create a PhraseQuery this is what FieldQuery supports. * It seems expensive but most queries will pretty small. */ - if (currentPos == terms.size()) { + if (currentPos == terms.length) { PhraseQuery.Builder queryBuilder = new PhraseQuery.Builder(); queryBuilder.setSlop(orig.getSlop()); for (int i = 0; i < termsIdx.length; i++) { - queryBuilder.add(terms.get(i)[termsIdx[i]], pos[i]); + queryBuilder.add(terms[i][termsIdx[i]], pos[i]); } Query query = queryBuilder.build(); this.flatten(query, reader, flatQueries, 1F); } else { - Term[] t = terms.get(currentPos); + Term[] t = terms[currentPos]; for (int i = 0; i < t.length; i++) { termsIdx[currentPos] = i; convertMultiPhraseQuery(currentPos+1, termsIdx, orig, terms, pos, reader, flatQueries); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java index f8bbebf7db8..5c88a8be3d3 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java @@ -33,6 +33,7 @@ import org.elasticsearch.cluster.metadata.MetaDataIndexStateService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; @@ -46,7 +47,8 @@ public class TransportCloseIndexAction extends TransportMasterNodeAction CLUSTER_INDICES_CLOSE_ENABLE_SETTING = Setting.boolSetting("cluster.indices.close.enable", true, true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_INDICES_CLOSE_ENABLE_SETTING = + Setting.boolSetting("cluster.indices.close.enable", true, Property.Dynamic, Property.NodeScope); @Inject public TransportCloseIndexAction(Settings settings, TransportService transportService, ClusterService clusterService, diff --git a/core/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java b/core/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java index d5574755346..339abcb22bc 100644 --- a/core/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java +++ b/core/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.mapper.MapperService; @@ -39,7 +40,8 @@ import java.util.List; */ public final class AutoCreateIndex { - public static final Setting AUTO_CREATE_INDEX_SETTING = new Setting<>("action.auto_create_index", "true", AutoCreate::new, false, Setting.Scope.CLUSTER); + public static final Setting AUTO_CREATE_INDEX_SETTING = + new Setting<>("action.auto_create_index", "true", AutoCreate::new, Property.NodeScope); private final boolean dynamicMappingDisabled; private final IndexNameExpressionResolver resolver; diff --git a/core/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java b/core/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java index 5f2fb33e043..31fc1d06175 100644 --- a/core/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java +++ b/core/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; /** @@ -33,7 +34,8 @@ public final class DestructiveOperations extends AbstractComponent { /** * Setting which controls whether wildcard usage (*, prefix*, _all) is allowed. */ - public static final Setting REQUIRES_NAME_SETTING = Setting.boolSetting("action.destructive_requires_name", false, true, Setting.Scope.CLUSTER); + public static final Setting REQUIRES_NAME_SETTING = + Setting.boolSetting("action.destructive_requires_name", false, Property.Dynamic, Property.NodeScope); private volatile boolean destructiveRequiresName; @Inject diff --git a/core/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeReadAction.java b/core/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeReadAction.java index ed08e5bdba3..f53355f24e3 100644 --- a/core/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeReadAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeReadAction.java @@ -24,6 +24,7 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -37,7 +38,8 @@ import java.util.function.Supplier; public abstract class TransportMasterNodeReadAction, Response extends ActionResponse> extends TransportMasterNodeAction { - public static final Setting FORCE_LOCAL_SETTING = Setting.boolSetting("action.master.force_local", false, false, Setting.Scope.CLUSTER); + public static final Setting FORCE_LOCAL_SETTING = + Setting.boolSetting("action.master.force_local", false, Property.NodeScope); private final boolean forceLocal; diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index 18a7e5e0705..69df4e61787 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -270,7 +270,7 @@ public abstract class TransportReplicationAction entry : properties.entrySet()) { - String key = (String) entry.getKey(); - String propertyName = key.startsWith("es.") ? key : "es." + key; - System.setProperty(propertyName, entry.getValue().toString()); - } - } - - // hacky way to extract all the fancy extra args, there is no CLI tool helper for this - Iterator iterator = cli.getArgList().iterator(); - final Map properties = new HashMap<>(); - while (iterator.hasNext()) { - String arg = iterator.next(); - if (!arg.startsWith("--")) { - if (arg.startsWith("-D") || arg.startsWith("-d") || arg.startsWith("-p")) { - throw new UserError(ExitStatus.USAGE, - "Parameter [" + arg + "] starting with \"-D\", \"-d\" or \"-p\" must be before any parameters starting with --" - ); - } else { - throw new UserError(ExitStatus.USAGE, "Parameter [" + arg + "]does not start with --"); - } - } - // if there is no = sign, we have to get the next argu - arg = arg.replace("--", ""); - if (arg.contains("=")) { - String[] splitArg = arg.split("=", 2); - String key = splitArg[0]; - String value = splitArg[1]; - properties.put("es." + key, value); - } else { - if (iterator.hasNext()) { - String value = iterator.next(); - if (value.startsWith("--")) { - throw new UserError(ExitStatus.USAGE, "Parameter [" + arg + "] needs value"); - } - properties.put("es." + arg, value); - } else { - throw new UserError(ExitStatus.USAGE, "Parameter [" + arg + "] needs value"); - } - } - } - for (Map.Entry entry : properties.entrySet()) { - System.setProperty(entry.getKey(), entry.getValue()); - } - return new Start(terminal); - } - - public Start(Terminal terminal) { - super(terminal); - - } - - @Override - public ExitStatus execute(Settings settings, Environment env) throws Exception { - return ExitStatus.OK; - } - } - -} diff --git a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCliParser.java b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCliParser.java new file mode 100644 index 00000000000..5c927305f14 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCliParser.java @@ -0,0 +1,95 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.bootstrap; + +import java.util.Arrays; + +import joptsimple.OptionSet; +import joptsimple.OptionSpec; +import org.elasticsearch.Build; +import org.elasticsearch.cli.Command; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.UserError; +import org.elasticsearch.common.Strings; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.monitor.jvm.JvmInfo; + +final class BootstrapCliParser extends Command { + + private final OptionSpec versionOption; + private final OptionSpec daemonizeOption; + private final OptionSpec pidfileOption; + private final OptionSpec propertyOption; + private boolean shouldRun = false; + + BootstrapCliParser() { + super("Starts elasticsearch"); + // TODO: in jopt-simple 5.0, make this mutually exclusive with all other options + versionOption = parser.acceptsAll(Arrays.asList("V", "version"), + "Prints elasticsearch version information and exits"); + daemonizeOption = parser.acceptsAll(Arrays.asList("d", "daemonize"), + "Starts Elasticsearch in the background"); + // TODO: in jopt-simple 5.0 this option type can be a Path + pidfileOption = parser.acceptsAll(Arrays.asList("p", "pidfile"), + "Creates a pid file in the specified path on start") + .withRequiredArg(); + propertyOption = parser.accepts("D", "Configures an Elasticsearch setting") + .withRequiredArg(); + } + + // TODO: don't use system properties as a way to do this, its horrible... + @SuppressForbidden(reason = "Sets system properties passed as CLI parameters") + @Override + protected void execute(Terminal terminal, OptionSet options) throws Exception { + if (options.has(versionOption)) { + terminal.println("Version: " + org.elasticsearch.Version.CURRENT + + ", Build: " + Build.CURRENT.shortHash() + "/" + Build.CURRENT.date() + + ", JVM: " + JvmInfo.jvmInfo().version()); + return; + } + + // TODO: don't use sysprops for any of these! pass the args through to bootstrap... + if (options.has(daemonizeOption)) { + System.setProperty("es.foreground", "false"); + } + String pidFile = pidfileOption.value(options); + if (Strings.isNullOrEmpty(pidFile) == false) { + System.setProperty("es.pidfile", pidFile); + } + + for (String property : propertyOption.values(options)) { + String[] keyValue = property.split("=", 2); + if (keyValue.length != 2) { + throw new UserError(ExitCodes.USAGE, "Malformed elasticsearch setting, must be of the form key=value"); + } + String key = keyValue[0]; + if (key.startsWith("es.") == false) { + key = "es." + key; + } + System.setProperty(key, keyValue[1]); + } + shouldRun = true; + } + + boolean shouldRun() { + return shouldRun; + } +} diff --git a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapSettings.java b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapSettings.java index a20ff9bb059..4e9dffc995b 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapSettings.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapSettings.java @@ -20,7 +20,7 @@ package org.elasticsearch.bootstrap; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Scope; +import org.elasticsearch.common.settings.Setting.Property; public final class BootstrapSettings { @@ -29,10 +29,13 @@ public final class BootstrapSettings { // TODO: remove this hack when insecure defaults are removed from java public static final Setting SECURITY_FILTER_BAD_DEFAULTS_SETTING = - Setting.boolSetting("security.manager.filter_bad_defaults", true, false, Scope.CLUSTER); + Setting.boolSetting("security.manager.filter_bad_defaults", true, Property.NodeScope); - public static final Setting MLOCKALL_SETTING = Setting.boolSetting("bootstrap.mlockall", false, false, Scope.CLUSTER); - public static final Setting SECCOMP_SETTING = Setting.boolSetting("bootstrap.seccomp", true, false, Scope.CLUSTER); - public static final Setting CTRLHANDLER_SETTING = Setting.boolSetting("bootstrap.ctrlhandler", true, false, Scope.CLUSTER); + public static final Setting MLOCKALL_SETTING = + Setting.boolSetting("bootstrap.mlockall", false, Property.NodeScope); + public static final Setting SECCOMP_SETTING = + Setting.boolSetting("bootstrap.seccomp", true, Property.NodeScope); + public static final Setting CTRLHANDLER_SETTING = + Setting.boolSetting("bootstrap.ctrlhandler", true, Property.NodeScope); } diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java b/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java index 107a955696c..3b95c3f4a6f 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java @@ -32,7 +32,7 @@ public final class Elasticsearch { /** * Main entry point for starting elasticsearch */ - public static void main(String[] args) throws StartupError { + public static void main(String[] args) throws Exception { try { Bootstrap.init(args); } catch (Throwable t) { diff --git a/core/src/main/java/org/elasticsearch/bootstrap/JNANatives.java b/core/src/main/java/org/elasticsearch/bootstrap/JNANatives.java index b9d5ce11dbc..0ea8da6a9be 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/JNANatives.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/JNANatives.java @@ -76,7 +76,7 @@ class JNANatives { softLimit = rlimit.rlim_cur.longValue(); hardLimit = rlimit.rlim_max.longValue(); } else { - logger.warn("Unable to retrieve resource limits: " + JNACLibrary.strerror(Native.getLastError())); + logger.warn("Unable to retrieve resource limits: {}", JNACLibrary.strerror(Native.getLastError())); } } } catch (UnsatisfiedLinkError e) { @@ -85,19 +85,20 @@ class JNANatives { } // mlockall failed for some reason - logger.warn("Unable to lock JVM Memory: error=" + errno + ",reason=" + errMsg); + logger.warn("Unable to lock JVM Memory: error={}, reason={}", errno , errMsg); logger.warn("This can result in part of the JVM being swapped out."); if (errno == JNACLibrary.ENOMEM) { if (rlimitSuccess) { - logger.warn("Increase RLIMIT_MEMLOCK, soft limit: " + rlimitToString(softLimit) + ", hard limit: " + rlimitToString(hardLimit)); + logger.warn("Increase RLIMIT_MEMLOCK, soft limit: {}, hard limit: {}", rlimitToString(softLimit), rlimitToString(hardLimit)); if (Constants.LINUX) { // give specific instructions for the linux case to make it easy String user = System.getProperty("user.name"); logger.warn("These can be adjusted by modifying /etc/security/limits.conf, for example: \n" + - "\t# allow user '" + user + "' mlockall\n" + - "\t" + user + " soft memlock unlimited\n" + - "\t" + user + " hard memlock unlimited" - ); + "\t# allow user '{}' mlockall\n" + + "\t{} soft memlock unlimited\n" + + "\t{} hard memlock unlimited", + user, user, user + ); logger.warn("If you are logged in interactively, you will have to re-login for the new limits to take effect."); } } else { @@ -155,7 +156,7 @@ class JNANatives { // the amount of memory we wish to lock, plus a small overhead (1MB). SizeT size = new SizeT(JvmInfo.jvmInfo().getMem().getHeapInit().getBytes() + (1024 * 1024)); if (!kernel.SetProcessWorkingSetSize(process, size, size)) { - logger.warn("Unable to lock JVM memory. Failed to set working set size. Error code " + Native.getLastError()); + logger.warn("Unable to lock JVM memory. Failed to set working set size. Error code {}", Native.getLastError()); } else { JNAKernel32Library.MemoryBasicInformation memInfo = new JNAKernel32Library.MemoryBasicInformation(); long address = 0; @@ -188,7 +189,7 @@ class JNANatives { if (result) { logger.debug("console ctrl handler correctly set"); } else { - logger.warn("unknown error " + Native.getLastError() + " when adding console ctrl handler:"); + logger.warn("unknown error {} when adding console ctrl handler", Native.getLastError()); } } catch (UnsatisfiedLinkError e) { // this will have already been logged by Kernel32Library, no need to repeat it diff --git a/core/src/main/java/org/elasticsearch/bootstrap/JVMCheck.java b/core/src/main/java/org/elasticsearch/bootstrap/JVMCheck.java index 3f81cd035bd..86629e4fa36 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/JVMCheck.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/JVMCheck.java @@ -200,7 +200,7 @@ final class JVMCheck { HotSpotCheck bug = JVM_BROKEN_HOTSPOT_VERSIONS.get(Constants.JVM_VERSION); if (bug != null && bug.check()) { if (bug.getWorkaround().isPresent() && ManagementFactory.getRuntimeMXBean().getInputArguments().contains(bug.getWorkaround().get())) { - Loggers.getLogger(JVMCheck.class).warn(bug.getWarningMessage().get()); + Loggers.getLogger(JVMCheck.class).warn("{}", bug.getWarningMessage().get()); } else { throw new RuntimeException(bug.getErrorMessage()); } diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Seccomp.java b/core/src/main/java/org/elasticsearch/bootstrap/Seccomp.java index 4325c5b7aef..46908e60642 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Seccomp.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Seccomp.java @@ -394,7 +394,7 @@ final class Seccomp { method = 0; int errno1 = Native.getLastError(); if (logger.isDebugEnabled()) { - logger.debug("seccomp(SECCOMP_SET_MODE_FILTER): " + JNACLibrary.strerror(errno1) + ", falling back to prctl(PR_SET_SECCOMP)..."); + logger.debug("seccomp(SECCOMP_SET_MODE_FILTER): {}, falling back to prctl(PR_SET_SECCOMP)...", JNACLibrary.strerror(errno1)); } if (linux_prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, pointer, 0, 0) != 0) { int errno2 = Native.getLastError(); diff --git a/core/src/main/java/org/elasticsearch/cache/recycler/PageCacheRecycler.java b/core/src/main/java/org/elasticsearch/cache/recycler/PageCacheRecycler.java index a293428192b..2fad8678649 100644 --- a/core/src/main/java/org/elasticsearch/cache/recycler/PageCacheRecycler.java +++ b/core/src/main/java/org/elasticsearch/cache/recycler/PageCacheRecycler.java @@ -19,13 +19,13 @@ package org.elasticsearch.cache.recycler; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.recycler.AbstractRecyclerC; import org.elasticsearch.common.recycler.Recycler; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; @@ -43,13 +43,19 @@ import static org.elasticsearch.common.recycler.Recyclers.none; /** A recycler of fixed-size pages. */ public class PageCacheRecycler extends AbstractComponent implements Releasable { - public static final Setting TYPE_SETTING = new Setting<>("cache.recycler.page.type", Type.CONCURRENT.name(), Type::parse, false, Setting.Scope.CLUSTER); - public static final Setting LIMIT_HEAP_SETTING = Setting.byteSizeSetting("cache.recycler.page.limit.heap", "10%", false, Setting.Scope.CLUSTER); - public static final Setting WEIGHT_BYTES_SETTING = Setting.doubleSetting("cache.recycler.page.weight.bytes", 1d, 0d, false, Setting.Scope.CLUSTER); - public static final Setting WEIGHT_LONG_SETTING = Setting.doubleSetting("cache.recycler.page.weight.longs", 1d, 0d, false, Setting.Scope.CLUSTER); - public static final Setting WEIGHT_INT_SETTING = Setting.doubleSetting("cache.recycler.page.weight.ints", 1d, 0d, false, Setting.Scope.CLUSTER); + public static final Setting TYPE_SETTING = + new Setting<>("cache.recycler.page.type", Type.CONCURRENT.name(), Type::parse, Property.NodeScope); + public static final Setting LIMIT_HEAP_SETTING = + Setting.byteSizeSetting("cache.recycler.page.limit.heap", "10%", Property.NodeScope); + public static final Setting WEIGHT_BYTES_SETTING = + Setting.doubleSetting("cache.recycler.page.weight.bytes", 1d, 0d, Property.NodeScope); + public static final Setting WEIGHT_LONG_SETTING = + Setting.doubleSetting("cache.recycler.page.weight.longs", 1d, 0d, Property.NodeScope); + public static final Setting WEIGHT_INT_SETTING = + Setting.doubleSetting("cache.recycler.page.weight.ints", 1d, 0d, Property.NodeScope); // object pages are less useful to us so we give them a lower weight by default - public static final Setting WEIGHT_OBJECTS_SETTING = Setting.doubleSetting("cache.recycler.page.weight.objects", 0.1d, 0d, false, Setting.Scope.CLUSTER); + public static final Setting WEIGHT_OBJECTS_SETTING = + Setting.doubleSetting("cache.recycler.page.weight.objects", 0.1d, 0d, Property.NodeScope); private final Recycler bytePage; private final Recycler intPage; diff --git a/core/src/main/java/org/elasticsearch/cli/Command.java b/core/src/main/java/org/elasticsearch/cli/Command.java new file mode 100644 index 00000000000..9e6afdd6638 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/cli/Command.java @@ -0,0 +1,112 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cli; + +import java.io.IOException; +import java.util.Arrays; + +import joptsimple.OptionException; +import joptsimple.OptionParser; +import joptsimple.OptionSet; +import joptsimple.OptionSpec; +import org.elasticsearch.common.SuppressForbidden; + +/** + * An action to execute within a cli. + */ +public abstract class Command { + + /** A description of the command, used in the help output. */ + protected final String description; + + /** The option parser for this command. */ + protected final OptionParser parser = new OptionParser(); + + private final OptionSpec helpOption = parser.acceptsAll(Arrays.asList("h", "help"), "show help").forHelp(); + private final OptionSpec silentOption = parser.acceptsAll(Arrays.asList("s", "silent"), "show minimal output"); + private final OptionSpec verboseOption = parser.acceptsAll(Arrays.asList("v", "verbose"), "show verbose output"); + + public Command(String description) { + this.description = description; + } + + /** Parses options for this command from args and executes it. */ + public final int main(String[] args, Terminal terminal) throws Exception { + try { + mainWithoutErrorHandling(args, terminal); + } catch (OptionException e) { + printHelp(terminal); + terminal.println(Terminal.Verbosity.SILENT, "ERROR: " + e.getMessage()); + return ExitCodes.USAGE; + } catch (UserError e) { + terminal.println(Terminal.Verbosity.SILENT, "ERROR: " + e.getMessage()); + return e.exitCode; + } + return ExitCodes.OK; + } + + /** + * Executes the command, but all errors are thrown. + */ + void mainWithoutErrorHandling(String[] args, Terminal terminal) throws Exception { + final OptionSet options = parser.parse(args); + + if (options.has(helpOption)) { + printHelp(terminal); + return; + } + + if (options.has(silentOption)) { + if (options.has(verboseOption)) { + // mutually exclusive, we can remove this with jopt-simple 5.0, which natively supports it + throw new UserError(ExitCodes.USAGE, "Cannot specify -s and -v together"); + } + terminal.setVerbosity(Terminal.Verbosity.SILENT); + } else if (options.has(verboseOption)) { + terminal.setVerbosity(Terminal.Verbosity.VERBOSE); + } else { + terminal.setVerbosity(Terminal.Verbosity.NORMAL); + } + + execute(terminal, options); + } + + /** Prints a help message for the command to the terminal. */ + private void printHelp(Terminal terminal) throws IOException { + terminal.println(description); + terminal.println(""); + printAdditionalHelp(terminal); + parser.printHelpOn(terminal.getWriter()); + } + + /** Prints additional help information, specific to the command */ + protected void printAdditionalHelp(Terminal terminal) {} + + @SuppressForbidden(reason = "Allowed to exit explicitly from #main()") + protected static void exit(int status) { + System.exit(status); + } + + /** + * Executes this command. + * + * Any runtime user errors (like an input file that does not exist), should throw a {@link UserError}. */ + protected abstract void execute(Terminal terminal, OptionSet options) throws Exception; +} diff --git a/core/src/main/java/org/elasticsearch/cli/ExitCodes.java b/core/src/main/java/org/elasticsearch/cli/ExitCodes.java new file mode 100644 index 00000000000..d08deb8b1ad --- /dev/null +++ b/core/src/main/java/org/elasticsearch/cli/ExitCodes.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cli; + +/** + * POSIX exit codes. + */ +public class ExitCodes { + public static final int OK = 0; + public static final int USAGE = 64; /* command line usage error */ + public static final int DATA_ERROR = 65; /* data format error */ + public static final int NO_INPUT = 66; /* cannot open input */ + public static final int NO_USER = 67; /* addressee unknown */ + public static final int NO_HOST = 68; /* host name unknown */ + public static final int UNAVAILABLE = 69; /* service unavailable */ + public static final int CODE_ERROR = 70; /* internal software error */ + public static final int CANT_CREATE = 73; /* can't create (user) output file */ + public static final int IO_ERROR = 74; /* input/output error */ + public static final int TEMP_FAILURE = 75; /* temp failure; user is invited to retry */ + public static final int PROTOCOL = 76; /* remote error in protocol */ + public static final int NOPERM = 77; /* permission denied */ + public static final int CONFIG = 78; /* configuration error */ + + private ExitCodes() { /* no instance, just constants */ } +} diff --git a/core/src/main/java/org/elasticsearch/cli/MultiCommand.java b/core/src/main/java/org/elasticsearch/cli/MultiCommand.java new file mode 100644 index 00000000000..a9feee0c9bf --- /dev/null +++ b/core/src/main/java/org/elasticsearch/cli/MultiCommand.java @@ -0,0 +1,71 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cli; + +import java.util.Arrays; +import java.util.LinkedHashMap; +import java.util.Map; + +import joptsimple.NonOptionArgumentSpec; +import joptsimple.OptionSet; + +/** + * A cli tool which is made up of multiple subcommands. + */ +public class MultiCommand extends Command { + + protected final Map subcommands = new LinkedHashMap<>(); + + private final NonOptionArgumentSpec arguments = parser.nonOptions("command"); + + public MultiCommand(String description) { + super(description); + parser.posixlyCorrect(true); + } + + @Override + protected void printAdditionalHelp(Terminal terminal) { + if (subcommands.isEmpty()) { + throw new IllegalStateException("No subcommands configured"); + } + terminal.println("Commands"); + terminal.println("--------"); + for (Map.Entry subcommand : subcommands.entrySet()) { + terminal.println(subcommand.getKey() + " - " + subcommand.getValue().description); + } + terminal.println(""); + } + + @Override + protected void execute(Terminal terminal, OptionSet options) throws Exception { + if (subcommands.isEmpty()) { + throw new IllegalStateException("No subcommands configured"); + } + String[] args = arguments.values(options).toArray(new String[0]); + if (args.length == 0) { + throw new UserError(ExitCodes.USAGE, "Missing command"); + } + Command subcommand = subcommands.get(args[0]); + if (subcommand == null) { + throw new UserError(ExitCodes.USAGE, "Unknown command [" + args[0] + "]"); + } + subcommand.mainWithoutErrorHandling(Arrays.copyOfRange(args, 1, args.length), terminal); + } +} diff --git a/core/src/main/java/org/elasticsearch/common/cli/Terminal.java b/core/src/main/java/org/elasticsearch/cli/Terminal.java similarity index 91% rename from core/src/main/java/org/elasticsearch/common/cli/Terminal.java rename to core/src/main/java/org/elasticsearch/cli/Terminal.java index fbef1f78cc3..d2dc57263dc 100644 --- a/core/src/main/java/org/elasticsearch/common/cli/Terminal.java +++ b/core/src/main/java/org/elasticsearch/cli/Terminal.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.common.cli; +package org.elasticsearch.cli; import java.io.BufferedReader; import java.io.Console; @@ -29,7 +29,7 @@ import java.nio.charset.Charset; import org.elasticsearch.common.SuppressForbidden; /** - * A Terminal wraps access to reading input and writing output for a {@link CliTool}. + * A Terminal wraps access to reading input and writing output for a cli. * * The available methods are similar to those of {@link Console}, with the ability * to read either normal text or a password, and the ability to print a line @@ -61,7 +61,7 @@ public abstract class Terminal { } /** Sets the verbosity of the terminal. */ - void setVerbosity(Verbosity verbosity) { + public void setVerbosity(Verbosity verbosity) { this.verbosity = verbosity; } @@ -89,35 +89,35 @@ public abstract class Terminal { private static class ConsoleTerminal extends Terminal { - private static final Console console = System.console(); + private static final Console CONSOLE = System.console(); ConsoleTerminal() { super(System.lineSeparator()); } static boolean isSupported() { - return console != null; + return CONSOLE != null; } @Override public PrintWriter getWriter() { - return console.writer(); + return CONSOLE.writer(); } @Override public String readText(String prompt) { - return console.readLine("%s", prompt); + return CONSOLE.readLine("%s", prompt); } @Override public char[] readSecret(String prompt) { - return console.readPassword("%s", prompt); + return CONSOLE.readPassword("%s", prompt); } } private static class SystemTerminal extends Terminal { - private final PrintWriter writer = newWriter(); + private static final PrintWriter WRITER = newWriter(); SystemTerminal() { super(System.lineSeparator()); @@ -130,7 +130,7 @@ public abstract class Terminal { @Override public PrintWriter getWriter() { - return writer; + return WRITER; } @Override diff --git a/core/src/main/java/org/elasticsearch/common/cli/UserError.java b/core/src/main/java/org/elasticsearch/cli/UserError.java similarity index 79% rename from core/src/main/java/org/elasticsearch/common/cli/UserError.java rename to core/src/main/java/org/elasticsearch/cli/UserError.java index ad709830885..2a4f2bf1233 100644 --- a/core/src/main/java/org/elasticsearch/common/cli/UserError.java +++ b/core/src/main/java/org/elasticsearch/cli/UserError.java @@ -17,19 +17,19 @@ * under the License. */ -package org.elasticsearch.common.cli; +package org.elasticsearch.cli; /** - * An exception representing a user fixable problem in {@link CliTool} usage. + * An exception representing a user fixable problem in {@link Command} usage. */ public class UserError extends Exception { /** The exist status the cli should use when catching this user error. */ - public final CliTool.ExitStatus exitStatus; + public final int exitCode; /** Constructs a UserError with an exit status and message to show the user. */ - public UserError(CliTool.ExitStatus exitStatus, String msg) { + public UserError(int exitCode, String msg) { super(msg); - this.exitStatus = exitStatus; + this.exitCode = exitCode; } } diff --git a/core/src/main/java/org/elasticsearch/client/Client.java b/core/src/main/java/org/elasticsearch/client/Client.java index f81ba9eb1b1..e5d8d4f55b7 100644 --- a/core/src/main/java/org/elasticsearch/client/Client.java +++ b/core/src/main/java/org/elasticsearch/client/Client.java @@ -19,12 +19,8 @@ package org.elasticsearch.client; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionRequestBuilder; -import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; @@ -87,6 +83,7 @@ import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import java.util.Map; @@ -114,7 +111,7 @@ public interface Client extends ElasticsearchClient, Releasable { default: throw new IllegalArgumentException("Can't parse [client.type] must be one of [node, transport]"); } - }, false, Setting.Scope.CLUSTER); + }, Property.NodeScope); /** * The admin client that can be used to perform administrative operations. diff --git a/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java b/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java index 1e605b9de06..abbc5823b2a 100644 --- a/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java +++ b/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java @@ -34,6 +34,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; @@ -100,10 +101,14 @@ public class TransportClientNodesService extends AbstractComponent { private volatile boolean closed; - public static final Setting CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL = Setting.positiveTimeSetting("client.transport.nodes_sampler_interval", timeValueSeconds(5), false, Setting.Scope.CLUSTER); - public static final Setting CLIENT_TRANSPORT_PING_TIMEOUT = Setting.positiveTimeSetting("client.transport.ping_timeout", timeValueSeconds(5), false, Setting.Scope.CLUSTER); - public static final Setting CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME = Setting.boolSetting("client.transport.ignore_cluster_name", false, false, Setting.Scope.CLUSTER); - public static final Setting CLIENT_TRANSPORT_SNIFF = Setting.boolSetting("client.transport.sniff", false, false, Setting.Scope.CLUSTER); + public static final Setting CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL = + Setting.positiveTimeSetting("client.transport.nodes_sampler_interval", timeValueSeconds(5), Property.NodeScope); + public static final Setting CLIENT_TRANSPORT_PING_TIMEOUT = + Setting.positiveTimeSetting("client.transport.ping_timeout", timeValueSeconds(5), Property.NodeScope); + public static final Setting CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME = + Setting.boolSetting("client.transport.ignore_cluster_name", false, Property.NodeScope); + public static final Setting CLIENT_TRANSPORT_SNIFF = + Setting.boolSetting("client.transport.sniff", false, Property.NodeScope); @Inject public TransportClientNodesService(Settings settings, ClusterName clusterName, TransportService transportService, @@ -119,7 +124,7 @@ public class TransportClientNodesService extends AbstractComponent { this.ignoreClusterName = CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME.get(this.settings); if (logger.isDebugEnabled()) { - logger.debug("node_sampler_interval[" + nodesSamplerInterval + "]"); + logger.debug("node_sampler_interval[{}]", nodesSamplerInterval); } if (CLIENT_TRANSPORT_SNIFF.get(this.settings)) { @@ -318,7 +323,7 @@ public class TransportClientNodesService extends AbstractComponent { transportService.connectToNode(node); } catch (Throwable e) { it.remove(); - logger.debug("failed to connect to discovered node [" + node + "]", e); + logger.debug("failed to connect to discovered node [{}]", e, node); } } } diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java index 6d9273b2661..42290e71779 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -58,6 +58,7 @@ import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.ExtensionPoint; import org.elasticsearch.gateway.GatewayAllocator; @@ -74,7 +75,8 @@ public class ClusterModule extends AbstractModule { public static final String EVEN_SHARD_COUNT_ALLOCATOR = "even_shard"; public static final String BALANCED_ALLOCATOR = "balanced"; // default - public static final Setting SHARDS_ALLOCATOR_TYPE_SETTING = new Setting<>("cluster.routing.allocation.type", BALANCED_ALLOCATOR, Function.identity(), false, Setting.Scope.CLUSTER); + public static final Setting SHARDS_ALLOCATOR_TYPE_SETTING = + new Setting<>("cluster.routing.allocation.type", BALANCED_ALLOCATOR, Function.identity(), Property.NodeScope); public static final List> DEFAULT_ALLOCATION_DECIDERS = Collections.unmodifiableList(Arrays.asList( SameShardAllocationDecider.class, diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterName.java b/core/src/main/java/org/elasticsearch/cluster/ClusterName.java index daf3000d710..09c64065dbd 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterName.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterName.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import java.io.IOException; @@ -37,7 +38,7 @@ public class ClusterName implements Streamable { throw new IllegalArgumentException("[cluster.name] must not be empty"); } return s; - }, false, Setting.Scope.CLUSTER); + }, Property.NodeScope); public static final ClusterName DEFAULT = new ClusterName(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY).intern()); diff --git a/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java b/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java index 5107b4495ab..896793f1bf3 100644 --- a/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java +++ b/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java @@ -39,6 +39,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; @@ -64,8 +65,12 @@ import java.util.concurrent.TimeUnit; */ public class InternalClusterInfoService extends AbstractComponent implements ClusterInfoService, LocalNodeMasterListener, ClusterStateListener { - public static final Setting INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING = Setting.timeSetting("cluster.info.update.interval", TimeValue.timeValueSeconds(30), TimeValue.timeValueSeconds(10), true, Setting.Scope.CLUSTER); - public static final Setting INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING = Setting.positiveTimeSetting("cluster.info.update.timeout", TimeValue.timeValueSeconds(15), true, Setting.Scope.CLUSTER); + public static final Setting INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING = + Setting.timeSetting("cluster.info.update.interval", TimeValue.timeValueSeconds(30), TimeValue.timeValueSeconds(10), + Property.Dynamic, Property.NodeScope); + public static final Setting INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING = + Setting.positiveTimeSetting("cluster.info.update.timeout", TimeValue.timeValueSeconds(15), + Property.Dynamic, Property.NodeScope); private volatile TimeValue updateFrequency; diff --git a/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java b/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java index cce25652ed7..698f9d1090c 100644 --- a/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java +++ b/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java @@ -35,6 +35,10 @@ import org.elasticsearch.transport.TransportService; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ScheduledFuture; +import static org.elasticsearch.common.settings.Setting.Property; +import static org.elasticsearch.common.settings.Setting.positiveTimeSetting; + + /** * This component is responsible for connecting to nodes once they are added to the cluster state, and disconnect when they are * removed. Also, it periodically checks that all connections are still open and if needed restores them. @@ -45,7 +49,7 @@ import java.util.concurrent.ScheduledFuture; public class NodeConnectionsService extends AbstractLifecycleComponent { public static final Setting CLUSTER_NODE_RECONNECT_INTERVAL_SETTING = - Setting.positiveTimeSetting("cluster.nodes.reconnect_interval", TimeValue.timeValueSeconds(10), false, Setting.Scope.CLUSTER); + positiveTimeSetting("cluster.nodes.reconnect_interval", TimeValue.timeValueSeconds(10), Property.NodeScope); private final ThreadPool threadPool; private final TransportService transportService; diff --git a/core/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java b/core/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java index 0e61712b010..d483d56d84c 100644 --- a/core/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java +++ b/core/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.mapper.MapperService; @@ -41,7 +42,9 @@ import java.util.concurrent.TimeoutException; */ public class MappingUpdatedAction extends AbstractComponent { - public static final Setting INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING = Setting.positiveTimeSetting("indices.mapping.dynamic_timeout", TimeValue.timeValueSeconds(30), true, Setting.Scope.CLUSTER); + public static final Setting INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING = + Setting.positiveTimeSetting("indices.mapping.dynamic_timeout", TimeValue.timeValueSeconds(30), + Property.Dynamic, Property.NodeScope); private IndicesAdminClient client; private volatile TimeValue dynamicMappingUpdateTimeout; diff --git a/core/src/main/java/org/elasticsearch/cluster/action/index/NodeIndexDeletedAction.java b/core/src/main/java/org/elasticsearch/cluster/action/index/NodeIndexDeletedAction.java index 012cc66e110..c2c1b468f1b 100644 --- a/core/src/main/java/org/elasticsearch/cluster/action/index/NodeIndexDeletedAction.java +++ b/core/src/main/java/org/elasticsearch/cluster/action/index/NodeIndexDeletedAction.java @@ -81,13 +81,13 @@ public class NodeIndexDeletedAction extends AbstractComponent { transportService.sendRequest(clusterState.nodes().masterNode(), INDEX_DELETED_ACTION_NAME, new NodeIndexDeletedMessage(index, nodeId), EmptyTransportResponseHandler.INSTANCE_SAME); if (nodes.localNode().isDataNode() == false) { - logger.trace("[{}] not acking store deletion (not a data node)"); + logger.trace("[{}] not acking store deletion (not a data node)", index); return; } threadPool.generic().execute(new AbstractRunnable() { @Override public void onFailure(Throwable t) { - logger.warn("[{}]failed to ack index store deleted for index", t, index); + logger.warn("[{}] failed to ack index store deleted for index", t, index); } @Override diff --git a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index c7f39015c18..3baf91a9dc8 100644 --- a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -151,7 +151,7 @@ public class ShardStateAction extends AbstractComponent { @Override public void onNewClusterState(ClusterState state) { if (logger.isTraceEnabled()) { - logger.trace("new cluster state [{}] after waiting for master election to fail shard [{}]", shardRoutingEntry.getShardRouting().shardId(), state.prettyPrint(), shardRoutingEntry); + logger.trace("new cluster state [{}] after waiting for master election to fail shard [{}]", state.prettyPrint(), shardRoutingEntry); } sendShardAction(actionName, observer, shardRoutingEntry, listener); } @@ -321,7 +321,7 @@ public class ShardStateAction extends AbstractComponent { if (numberOfUnassignedShards > 0) { String reason = String.format(Locale.ROOT, "[%d] unassigned shards after failing shards", numberOfUnassignedShards); if (logger.isTraceEnabled()) { - logger.trace(reason + ", scheduling a reroute"); + logger.trace("{}, scheduling a reroute", reason); } routingService.reroute(reason); } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/AutoExpandReplicas.java b/core/src/main/java/org/elasticsearch/cluster/metadata/AutoExpandReplicas.java index d9b288bb897..4b4a8e54d7c 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/AutoExpandReplicas.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/AutoExpandReplicas.java @@ -20,6 +20,7 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; /** * This class acts as a functional wrapper around the index.auto_expand_replicas setting. @@ -56,7 +57,7 @@ final class AutoExpandReplicas { } } return new AutoExpandReplicas(min, max, true); - }, true, Setting.Scope.INDEX); + }, Property.Dynamic, Property.IndexScope); private final int minReplicas; private final int maxReplicas; diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index 7b4d5a68cec..8c093a72ff3 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -38,6 +38,7 @@ import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.loader.SettingsLoader; import org.elasticsearch.common.xcontent.FromXContentBuilder; @@ -152,28 +153,36 @@ public class IndexMetaData implements Diffable, FromXContentBuild public static final String INDEX_SETTING_PREFIX = "index."; public static final String SETTING_NUMBER_OF_SHARDS = "index.number_of_shards"; - public static final Setting INDEX_NUMBER_OF_SHARDS_SETTING = Setting.intSetting(SETTING_NUMBER_OF_SHARDS, 5, 1, false, Setting.Scope.INDEX); + public static final Setting INDEX_NUMBER_OF_SHARDS_SETTING = + Setting.intSetting(SETTING_NUMBER_OF_SHARDS, 5, 1, Property.IndexScope); public static final String SETTING_NUMBER_OF_REPLICAS = "index.number_of_replicas"; - public static final Setting INDEX_NUMBER_OF_REPLICAS_SETTING = Setting.intSetting(SETTING_NUMBER_OF_REPLICAS, 1, 0, true, Setting.Scope.INDEX); + public static final Setting INDEX_NUMBER_OF_REPLICAS_SETTING = + Setting.intSetting(SETTING_NUMBER_OF_REPLICAS, 1, 0, Property.Dynamic, Property.IndexScope); public static final String SETTING_SHADOW_REPLICAS = "index.shadow_replicas"; - public static final Setting INDEX_SHADOW_REPLICAS_SETTING = Setting.boolSetting(SETTING_SHADOW_REPLICAS, false, false, Setting.Scope.INDEX); + public static final Setting INDEX_SHADOW_REPLICAS_SETTING = + Setting.boolSetting(SETTING_SHADOW_REPLICAS, false, Property.IndexScope); public static final String SETTING_SHARED_FILESYSTEM = "index.shared_filesystem"; - public static final Setting INDEX_SHARED_FILESYSTEM_SETTING = Setting.boolSetting(SETTING_SHARED_FILESYSTEM, false, false, Setting.Scope.INDEX); + public static final Setting INDEX_SHARED_FILESYSTEM_SETTING = + Setting.boolSetting(SETTING_SHARED_FILESYSTEM, false, Property.IndexScope); public static final String SETTING_AUTO_EXPAND_REPLICAS = "index.auto_expand_replicas"; public static final Setting INDEX_AUTO_EXPAND_REPLICAS_SETTING = AutoExpandReplicas.SETTING; public static final String SETTING_READ_ONLY = "index.blocks.read_only"; - public static final Setting INDEX_READ_ONLY_SETTING = Setting.boolSetting(SETTING_READ_ONLY, false, true, Setting.Scope.INDEX); + public static final Setting INDEX_READ_ONLY_SETTING = + Setting.boolSetting(SETTING_READ_ONLY, false, Property.Dynamic, Property.IndexScope); public static final String SETTING_BLOCKS_READ = "index.blocks.read"; - public static final Setting INDEX_BLOCKS_READ_SETTING = Setting.boolSetting(SETTING_BLOCKS_READ, false, true, Setting.Scope.INDEX); + public static final Setting INDEX_BLOCKS_READ_SETTING = + Setting.boolSetting(SETTING_BLOCKS_READ, false, Property.Dynamic, Property.IndexScope); public static final String SETTING_BLOCKS_WRITE = "index.blocks.write"; - public static final Setting INDEX_BLOCKS_WRITE_SETTING = Setting.boolSetting(SETTING_BLOCKS_WRITE, false, true, Setting.Scope.INDEX); + public static final Setting INDEX_BLOCKS_WRITE_SETTING = + Setting.boolSetting(SETTING_BLOCKS_WRITE, false, Property.Dynamic, Property.IndexScope); public static final String SETTING_BLOCKS_METADATA = "index.blocks.metadata"; - public static final Setting INDEX_BLOCKS_METADATA_SETTING = Setting.boolSetting(SETTING_BLOCKS_METADATA, false, true, Setting.Scope.INDEX); + public static final Setting INDEX_BLOCKS_METADATA_SETTING = + Setting.boolSetting(SETTING_BLOCKS_METADATA, false, Property.Dynamic, Property.IndexScope); public static final String SETTING_VERSION_CREATED = "index.version.created"; public static final String SETTING_VERSION_CREATED_STRING = "index.version.created_string"; @@ -182,18 +191,24 @@ public class IndexMetaData implements Diffable, FromXContentBuild public static final String SETTING_VERSION_MINIMUM_COMPATIBLE = "index.version.minimum_compatible"; public static final String SETTING_CREATION_DATE = "index.creation_date"; public static final String SETTING_PRIORITY = "index.priority"; - public static final Setting INDEX_PRIORITY_SETTING = Setting.intSetting("index.priority", 1, 0, true, Setting.Scope.INDEX); + public static final Setting INDEX_PRIORITY_SETTING = + Setting.intSetting("index.priority", 1, 0, Property.Dynamic, Property.IndexScope); public static final String SETTING_CREATION_DATE_STRING = "index.creation_date_string"; public static final String SETTING_INDEX_UUID = "index.uuid"; public static final String SETTING_DATA_PATH = "index.data_path"; - public static final Setting INDEX_DATA_PATH_SETTING = new Setting<>(SETTING_DATA_PATH, "", Function.identity(), false, Setting.Scope.INDEX); + public static final Setting INDEX_DATA_PATH_SETTING = + new Setting<>(SETTING_DATA_PATH, "", Function.identity(), Property.IndexScope); public static final String SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE = "index.shared_filesystem.recover_on_any_node"; - public static final Setting INDEX_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE_SETTING = Setting.boolSetting(SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false, true, Setting.Scope.INDEX); + public static final Setting INDEX_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE_SETTING = + Setting.boolSetting(SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false, Property.Dynamic, Property.IndexScope); public static final String INDEX_UUID_NA_VALUE = "_na_"; - public static final Setting INDEX_ROUTING_REQUIRE_GROUP_SETTING = Setting.groupSetting("index.routing.allocation.require.", true, Setting.Scope.INDEX); - public static final Setting INDEX_ROUTING_INCLUDE_GROUP_SETTING = Setting.groupSetting("index.routing.allocation.include.", true, Setting.Scope.INDEX); - public static final Setting INDEX_ROUTING_EXCLUDE_GROUP_SETTING = Setting.groupSetting("index.routing.allocation.exclude.", true, Setting.Scope.INDEX); + public static final Setting INDEX_ROUTING_REQUIRE_GROUP_SETTING = + Setting.groupSetting("index.routing.allocation.require.", Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_ROUTING_INCLUDE_GROUP_SETTING = + Setting.groupSetting("index.routing.allocation.include.", Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_ROUTING_EXCLUDE_GROUP_SETTING = + Setting.groupSetting("index.routing.allocation.exclude.", Property.Dynamic, Property.IndexScope); public static final IndexMetaData PROTO = IndexMetaData.builder("") .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index d7dddb15984..4c83f64581e 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -41,6 +41,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.loader.SettingsLoader; import org.elasticsearch.common.xcontent.FromXContentBuilder; @@ -139,7 +140,8 @@ public class MetaData implements Iterable, Diffable, Fr } - public static final Setting SETTING_READ_ONLY_SETTING = Setting.boolSetting("cluster.blocks.read_only", false, true, Setting.Scope.CLUSTER); + public static final Setting SETTING_READ_ONLY_SETTING = + Setting.boolSetting("cluster.blocks.read_only", false, Property.Dynamic, Property.NodeScope); public static final ClusterBlock CLUSTER_READ_ONLY_BLOCK = new ClusterBlock(6, "cluster read-only (api)", false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE)); diff --git a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeService.java b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeService.java index 47c0e0052d3..ccd30a99e9c 100644 --- a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeService.java +++ b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeService.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; @@ -40,7 +41,7 @@ public class DiscoveryNodeService extends AbstractComponent { public static final Setting NODE_ID_SEED_SETTING = // don't use node.id.seed so it won't be seen as an attribute - Setting.longSetting("node_id.seed", 0L, Long.MIN_VALUE, false, Setting.Scope.CLUSTER); + Setting.longSetting("node_id.seed", 0L, Long.MIN_VALUE, Property.NodeScope); private final List customAttributesProviders = new CopyOnWriteArrayList<>(); private final Version version; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java b/core/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java index 714c1e4913a..be7d90a1fef 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.joda.FormatDateTimeFormatter; import org.elasticsearch.common.joda.Joda; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ToXContent; @@ -44,7 +45,9 @@ public class UnassignedInfo implements ToXContent, Writeable { public static final FormatDateTimeFormatter DATE_TIME_FORMATTER = Joda.forPattern("dateOptionalTime"); private static final TimeValue DEFAULT_DELAYED_NODE_LEFT_TIMEOUT = TimeValue.timeValueMinutes(1); - public static final Setting INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING = Setting.timeSetting("index.unassigned.node_left.delayed_timeout", DEFAULT_DELAYED_NODE_LEFT_TIMEOUT, true, Setting.Scope.INDEX); + public static final Setting INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING = + Setting.timeSetting("index.unassigned.node_left.delayed_timeout", DEFAULT_DELAYED_NODE_LEFT_TIMEOUT, Property.Dynamic, + Property.IndexScope); /** * Reason why the shard is in unassigned state. diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index 6377e06e245..26b8b224d78 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -36,6 +36,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.gateway.PriorityComparator; @@ -67,9 +68,13 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING; */ public class BalancedShardsAllocator extends AbstractComponent implements ShardsAllocator { - public static final Setting INDEX_BALANCE_FACTOR_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.index", 0.55f, true, Setting.Scope.CLUSTER); - public static final Setting SHARD_BALANCE_FACTOR_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.shard", 0.45f, true, Setting.Scope.CLUSTER); - public static final Setting THRESHOLD_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.threshold", 1.0f, 0.0f, true, Setting.Scope.CLUSTER); + public static final Setting INDEX_BALANCE_FACTOR_SETTING = + Setting.floatSetting("cluster.routing.allocation.balance.index", 0.55f, Property.Dynamic, Property.NodeScope); + public static final Setting SHARD_BALANCE_FACTOR_SETTING = + Setting.floatSetting("cluster.routing.allocation.balance.shard", 0.45f, Property.Dynamic, Property.NodeScope); + public static final Setting THRESHOLD_SETTING = + Setting.floatSetting("cluster.routing.allocation.balance.threshold", 1.0f, 0.0f, + Property.Dynamic, Property.NodeScope); private volatile WeightFunction weightFunction; private volatile float threshold; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java index 9859a9b6584..77613f39084 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import java.util.HashMap; @@ -77,8 +78,11 @@ public class AwarenessAllocationDecider extends AllocationDecider { public static final String NAME = "awareness"; - public static final Setting CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING = new Setting<>("cluster.routing.allocation.awareness.attributes", "", Strings::splitStringByCommaToArray , true, Setting.Scope.CLUSTER); - public static final Setting CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.awareness.force.", true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING = + new Setting<>("cluster.routing.allocation.awareness.attributes", "", Strings::splitStringByCommaToArray , Property.Dynamic, + Property.NodeScope); + public static final Setting CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING = + Setting.groupSetting("cluster.routing.allocation.awareness.force.", Property.Dynamic, Property.NodeScope); private String[] awarenessAttributes; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java index 11fce397b26..84e974aceb0 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java @@ -24,6 +24,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import java.util.Locale; @@ -48,7 +49,9 @@ import java.util.Locale; public class ClusterRebalanceAllocationDecider extends AllocationDecider { public static final String NAME = "cluster_rebalance"; - public static final Setting CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING = new Setting<>("cluster.routing.allocation.allow_rebalance", ClusterRebalanceType.INDICES_ALL_ACTIVE.name().toLowerCase(Locale.ROOT), ClusterRebalanceType::parseString, true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING = + new Setting<>("cluster.routing.allocation.allow_rebalance", ClusterRebalanceType.INDICES_ALL_ACTIVE.name().toLowerCase(Locale.ROOT), + ClusterRebalanceType::parseString, Property.Dynamic, Property.NodeScope); /** * An enum representation for the configured re-balance type. diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java index d39b9604066..fe6bf918dc2 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java @@ -24,6 +24,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; /** @@ -42,7 +43,9 @@ public class ConcurrentRebalanceAllocationDecider extends AllocationDecider { public static final String NAME = "concurrent_rebalance"; - public static final Setting CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING = Setting.intSetting("cluster.routing.allocation.cluster_concurrent_rebalance", 2, -1, true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING = + Setting.intSetting("cluster.routing.allocation.cluster_concurrent_rebalance", 2, -1, + Property.Dynamic, Property.NodeScope); private volatile int clusterConcurrentRebalance; @Inject diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index 821fa55d704..dcb6080bd1e 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -38,6 +38,7 @@ import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.RatioValue; @@ -81,11 +82,22 @@ public class DiskThresholdDecider extends AllocationDecider { private volatile boolean enabled; private volatile TimeValue rerouteInterval; - public static final Setting CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING = Setting.boolSetting("cluster.routing.allocation.disk.threshold_enabled", true, true, Setting.Scope.CLUSTER); - public static final Setting CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING = new Setting<>("cluster.routing.allocation.disk.watermark.low", "85%", (s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.low"), true, Setting.Scope.CLUSTER); - public static final Setting CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING = new Setting<>("cluster.routing.allocation.disk.watermark.high", "90%", (s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.high"), true, Setting.Scope.CLUSTER); - public static final Setting CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING = Setting.boolSetting("cluster.routing.allocation.disk.include_relocations", true, true, Setting.Scope.CLUSTER);; - public static final Setting CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING = Setting.positiveTimeSetting("cluster.routing.allocation.disk.reroute_interval", TimeValue.timeValueSeconds(60), true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING = + Setting.boolSetting("cluster.routing.allocation.disk.threshold_enabled", true, Property.Dynamic, Property.NodeScope); + public static final Setting CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING = + new Setting<>("cluster.routing.allocation.disk.watermark.low", "85%", + (s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.low"), + Property.Dynamic, Property.NodeScope); + public static final Setting CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING = + new Setting<>("cluster.routing.allocation.disk.watermark.high", "90%", + (s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.high"), + Property.Dynamic, Property.NodeScope); + public static final Setting CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING = + Setting.boolSetting("cluster.routing.allocation.disk.include_relocations", true, + Property.Dynamic, Property.NodeScope);; + public static final Setting CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING = + Setting.positiveTimeSetting("cluster.routing.allocation.disk.reroute_interval", TimeValue.timeValueSeconds(60), + Property.Dynamic, Property.NodeScope); /** * Listens for a node to go over the high watermark and kicks off an empty diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java index 9131355876b..80dada86022 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import java.util.Locale; @@ -60,11 +61,19 @@ public class EnableAllocationDecider extends AllocationDecider { public static final String NAME = "enable"; - public static final Setting CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING = new Setting<>("cluster.routing.allocation.enable", Allocation.ALL.name(), Allocation::parse, true, Setting.Scope.CLUSTER); - public static final Setting INDEX_ROUTING_ALLOCATION_ENABLE_SETTING = new Setting<>("index.routing.allocation.enable", Allocation.ALL.name(), Allocation::parse, true, Setting.Scope.INDEX); + public static final Setting CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING = + new Setting<>("cluster.routing.allocation.enable", Allocation.ALL.name(), Allocation::parse, + Property.Dynamic, Property.NodeScope); + public static final Setting INDEX_ROUTING_ALLOCATION_ENABLE_SETTING = + new Setting<>("index.routing.allocation.enable", Allocation.ALL.name(), Allocation::parse, + Property.Dynamic, Property.IndexScope); - public static final Setting CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING = new Setting<>("cluster.routing.rebalance.enable", Rebalance.ALL.name(), Rebalance::parse, true, Setting.Scope.CLUSTER); - public static final Setting INDEX_ROUTING_REBALANCE_ENABLE_SETTING = new Setting<>("index.routing.rebalance.enable", Rebalance.ALL.name(), Rebalance::parse, true, Setting.Scope.INDEX); + public static final Setting CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING = + new Setting<>("cluster.routing.rebalance.enable", Rebalance.ALL.name(), Rebalance::parse, + Property.Dynamic, Property.NodeScope); + public static final Setting INDEX_ROUTING_REBALANCE_ENABLE_SETTING = + new Setting<>("index.routing.rebalance.enable", Rebalance.ALL.name(), Rebalance::parse, + Property.Dynamic, Property.IndexScope); private volatile Rebalance enableRebalance; private volatile Allocation enableAllocation; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java index f8ff5f37aed..c3ff0bb355e 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java @@ -27,6 +27,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.AND; @@ -60,9 +61,12 @@ public class FilterAllocationDecider extends AllocationDecider { public static final String NAME = "filter"; - public static final Setting CLUSTER_ROUTING_REQUIRE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.require.", true, Setting.Scope.CLUSTER); - public static final Setting CLUSTER_ROUTING_INCLUDE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.include.", true, Setting.Scope.CLUSTER); - public static final Setting CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.exclude.", true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_ROUTING_REQUIRE_GROUP_SETTING = + Setting.groupSetting("cluster.routing.allocation.require.", Property.Dynamic, Property.NodeScope); + public static final Setting CLUSTER_ROUTING_INCLUDE_GROUP_SETTING = + Setting.groupSetting("cluster.routing.allocation.include.", Property.Dynamic, Property.NodeScope); + public static final Setting CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING = + Setting.groupSetting("cluster.routing.allocation.exclude.", Property.Dynamic, Property.NodeScope); private volatile DiscoveryNodeFilters clusterRequireFilters; private volatile DiscoveryNodeFilters clusterIncludeFilters; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java index e766b4c49aa..ab8be4dc8da 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java @@ -27,6 +27,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; /** @@ -59,13 +60,17 @@ public class ShardsLimitAllocationDecider extends AllocationDecider { * Controls the maximum number of shards per index on a single Elasticsearch * node. Negative values are interpreted as unlimited. */ - public static final Setting INDEX_TOTAL_SHARDS_PER_NODE_SETTING = Setting.intSetting("index.routing.allocation.total_shards_per_node", -1, -1, true, Setting.Scope.INDEX); + public static final Setting INDEX_TOTAL_SHARDS_PER_NODE_SETTING = + Setting.intSetting("index.routing.allocation.total_shards_per_node", -1, -1, + Property.Dynamic, Property.IndexScope); /** * Controls the maximum number of shards per node on a global level. * Negative values are interpreted as unlimited. */ - public static final Setting CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING = Setting.intSetting("cluster.routing.allocation.total_shards_per_node", -1, -1, true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING = + Setting.intSetting("cluster.routing.allocation.total_shards_per_node", -1, -1, + Property.Dynamic, Property.NodeScope); @Inject diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java index cf889cde6ad..d656afc8036 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; /** @@ -39,7 +40,9 @@ public class SnapshotInProgressAllocationDecider extends AllocationDecider { /** * Disables relocation of shards that are currently being snapshotted. */ - public static final Setting CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING = Setting.boolSetting("cluster.routing.allocation.snapshot.relocation_enabled", false, true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING = + Setting.boolSetting("cluster.routing.allocation.snapshot.relocation_enabled", false, + Property.Dynamic, Property.NodeScope); private volatile boolean enableRelocation = false; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java index 25f43f57610..ca6b312da4c 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java @@ -25,6 +25,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; /** @@ -50,10 +51,25 @@ public class ThrottlingAllocationDecider extends AllocationDecider { public static final int DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES = 2; public static final int DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES = 4; public static final String NAME = "throttling"; - public static final Setting CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING = new Setting<>("cluster.routing.allocation.node_concurrent_recoveries", Integer.toString(DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES), (s) -> Setting.parseInt(s, 0, "cluster.routing.allocation.node_concurrent_recoveries"), true, Setting.Scope.CLUSTER); - public static final Setting CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING = Setting.intSetting("cluster.routing.allocation.node_initial_primaries_recoveries", DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, 0, true, Setting.Scope.CLUSTER); - public static final Setting CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING = new Setting<>("cluster.routing.allocation.node_concurrent_incoming_recoveries", (s) -> CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getRaw(s), (s) -> Setting.parseInt(s, 0, "cluster.routing.allocation.node_concurrent_incoming_recoveries"), true, Setting.Scope.CLUSTER); - public static final Setting CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING = new Setting<>("cluster.routing.allocation.node_concurrent_outgoing_recoveries", (s) -> CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getRaw(s), (s) -> Setting.parseInt(s, 0, "cluster.routing.allocation.node_concurrent_outgoing_recoveries"), true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING = + new Setting<>("cluster.routing.allocation.node_concurrent_recoveries", + Integer.toString(DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES), + (s) -> Setting.parseInt(s, 0, "cluster.routing.allocation.node_concurrent_recoveries"), + Property.Dynamic, Property.NodeScope); + public static final Setting CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING = + Setting.intSetting("cluster.routing.allocation.node_initial_primaries_recoveries", + DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, 0, + Property.Dynamic, Property.NodeScope); + public static final Setting CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING = + new Setting<>("cluster.routing.allocation.node_concurrent_incoming_recoveries", + (s) -> CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getRaw(s), + (s) -> Setting.parseInt(s, 0, "cluster.routing.allocation.node_concurrent_incoming_recoveries"), + Property.Dynamic, Property.NodeScope); + public static final Setting CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING = + new Setting<>("cluster.routing.allocation.node_concurrent_outgoing_recoveries", + (s) -> CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getRaw(s), + (s) -> Setting.parseInt(s, 0, "cluster.routing.allocation.node_concurrent_outgoing_recoveries"), + Property.Dynamic, Property.NodeScope); private volatile int primariesInitialRecoveries; diff --git a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java b/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java index 7cd3d840fbc..525a9a9af40 100644 --- a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java +++ b/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java @@ -50,6 +50,7 @@ import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.unit.TimeValue; @@ -89,7 +90,9 @@ import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadF */ public class InternalClusterService extends AbstractLifecycleComponent implements ClusterService { - public static final Setting CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING = Setting.positiveTimeSetting("cluster.service.slow_task_logging_threshold", TimeValue.timeValueSeconds(30), true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING = + Setting.positiveTimeSetting("cluster.service.slow_task_logging_threshold", TimeValue.timeValueSeconds(30), + Property.Dynamic, Property.NodeScope); public static final String UPDATE_THREAD_NAME = "clusterService#updateTask"; private final ThreadPool threadPool; @@ -436,11 +439,8 @@ public class InternalClusterService extends AbstractLifecycleComponentbuilder().failures(toExecute.stream().map(updateTask -> updateTask.task)::iterator, e).build(previousClusterState); @@ -523,9 +523,7 @@ public class InternalClusterService extends AbstractLifecycleComponent> permissions = new HashMap<>(paths.length); - Map owners = new HashMap<>(paths.length); - Map groups = new HashMap<>(paths.length); - - if (paths != null && paths.length > 0) { - for (Path path : paths) { - try { - boolean supportsPosixPermissions = Environment.getFileStore(path).supportsFileAttributeView(PosixFileAttributeView.class); - if (supportsPosixPermissions) { - PosixFileAttributes attributes = Files.readAttributes(path, PosixFileAttributes.class); - permissions.put(path, attributes.permissions()); - owners.put(path, attributes.owner().getName()); - groups.put(path, attributes.group().getName()); - } - } catch (IOException e) { - // silently swallow if not supported, no need to log things - } - } - } - - CliTool.ExitStatus status = doExecute(settings, env); - - // check if permissions differ - for (Map.Entry> entry : permissions.entrySet()) { - if (!Files.exists(entry.getKey())) { - continue; - } - - Set permissionsBeforeWrite = entry.getValue(); - Set permissionsAfterWrite = Files.getPosixFilePermissions(entry.getKey()); - if (!permissionsBeforeWrite.equals(permissionsAfterWrite)) { - terminal.println(Terminal.Verbosity.SILENT, "WARNING: The file permissions of [" + entry.getKey() + "] have changed " - + "from [" + PosixFilePermissions.toString(permissionsBeforeWrite) + "] " - + "to [" + PosixFilePermissions.toString(permissionsAfterWrite) + "]"); - terminal.println(Terminal.Verbosity.SILENT, "Please ensure that the user account running Elasticsearch has read access to this file!"); - } - } - - // check if owner differs - for (Map.Entry entry : owners.entrySet()) { - if (!Files.exists(entry.getKey())) { - continue; - } - - String ownerBeforeWrite = entry.getValue(); - String ownerAfterWrite = Files.getOwner(entry.getKey()).getName(); - if (!ownerAfterWrite.equals(ownerBeforeWrite)) { - terminal.println(Terminal.Verbosity.SILENT, "WARNING: Owner of file [" + entry.getKey() + "] used to be [" + ownerBeforeWrite + "], but now is [" + ownerAfterWrite + "]"); - } - } - - // check if group differs - for (Map.Entry entry : groups.entrySet()) { - if (!Files.exists(entry.getKey())) { - continue; - } - - String groupBeforeWrite = entry.getValue(); - String groupAfterWrite = Files.readAttributes(entry.getKey(), PosixFileAttributes.class).group().getName(); - if (!groupAfterWrite.equals(groupBeforeWrite)) { - terminal.println(Terminal.Verbosity.SILENT, "WARNING: Group of file [" + entry.getKey() + "] used to be [" + groupBeforeWrite + "], but now is [" + groupAfterWrite + "]"); - } - } - - return status; - } -} diff --git a/core/src/main/java/org/elasticsearch/common/cli/CliTool.java b/core/src/main/java/org/elasticsearch/common/cli/CliTool.java deleted file mode 100644 index 2ea01f45068..00000000000 --- a/core/src/main/java/org/elasticsearch/common/cli/CliTool.java +++ /dev/null @@ -1,250 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.cli; - -import org.apache.commons.cli.AlreadySelectedException; -import org.apache.commons.cli.CommandLine; -import org.apache.commons.cli.CommandLineParser; -import org.apache.commons.cli.DefaultParser; -import org.apache.commons.cli.MissingArgumentException; -import org.apache.commons.cli.MissingOptionException; -import org.apache.commons.cli.UnrecognizedOptionException; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; -import org.elasticsearch.node.internal.InternalSettingsPreparer; - -import java.util.Locale; - -import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS; - -/** - * A base class for command-line interface tool. - * - * Two modes are supported: - * - * - Single command mode. The tool exposes a single command that can potentially accept arguments (eg. CLI options). - * - Multi command mode. The tool support multiple commands, each for different tasks, each potentially accepts arguments. - * - * In a multi-command mode. The first argument must be the command name. For example, the plugin manager - * can be seen as a multi-command tool with two possible commands: install and uninstall - * - * The tool is configured using a {@link CliToolConfig} which encapsulates the tool's commands and their - * potential options. The tool also comes with out of the box simple help support (the -h/--help option is - * automatically handled) where the help text is configured in a dedicated *.help files located in the same package - * as the tool. - */ -public abstract class CliTool { - - // based on sysexits.h - public enum ExitStatus { - OK(0), - OK_AND_EXIT(0), - USAGE(64), /* command line usage error */ - DATA_ERROR(65), /* data format error */ - NO_INPUT(66), /* cannot open input */ - NO_USER(67), /* addressee unknown */ - NO_HOST(68), /* host name unknown */ - UNAVAILABLE(69), /* service unavailable */ - CODE_ERROR(70), /* internal software error */ - CANT_CREATE(73), /* can't create (user) output file */ - IO_ERROR(74), /* input/output error */ - TEMP_FAILURE(75), /* temp failure; user is invited to retry */ - PROTOCOL(76), /* remote error in protocol */ - NOPERM(77), /* permission denied */ - CONFIG(78); /* configuration error */ - - final int status; - - ExitStatus(int status) { - this.status = status; - } - - public int status() { - return status; - } - } - - protected final Terminal terminal; - protected final Environment env; - protected final Settings settings; - - private final CliToolConfig config; - - protected CliTool(CliToolConfig config) { - this(config, Terminal.DEFAULT); - } - - protected CliTool(CliToolConfig config, Terminal terminal) { - if (config.cmds().size() == 0) { - throw new IllegalArgumentException("At least one command must be configured"); - } - this.config = config; - this.terminal = terminal; - env = InternalSettingsPreparer.prepareEnvironment(EMPTY_SETTINGS, terminal); - settings = env.settings(); - } - - public final ExitStatus execute(String... args) throws Exception { - - // first lets see if the user requests tool help. We're doing it only if - // this is a multi-command tool. If it's a single command tool, the -h/--help - // option will be taken care of on the command level - if (!config.isSingle() && args.length > 0 && (args[0].equals("-h") || args[0].equals("--help"))) { - config.printUsage(terminal); - return ExitStatus.OK_AND_EXIT; - } - - CliToolConfig.Cmd cmd; - if (config.isSingle()) { - cmd = config.single(); - } else { - - if (args.length == 0) { - terminal.println(Terminal.Verbosity.SILENT, "ERROR: command not specified"); - config.printUsage(terminal); - return ExitStatus.USAGE; - } - - String cmdName = args[0]; - cmd = config.cmd(cmdName); - if (cmd == null) { - terminal.println(Terminal.Verbosity.SILENT, "ERROR: unknown command [" + cmdName + "]. Use [-h] option to list available commands"); - return ExitStatus.USAGE; - } - - // we now remove the command name from the args - if (args.length == 1) { - args = new String[0]; - } else { - String[] cmdArgs = new String[args.length - 1]; - System.arraycopy(args, 1, cmdArgs, 0, cmdArgs.length); - args = cmdArgs; - } - } - - try { - return parse(cmd, args).execute(settings, env); - } catch (UserError error) { - terminal.println(Terminal.Verbosity.SILENT, "ERROR: " + error.getMessage()); - return error.exitStatus; - } - } - - public Command parse(String cmdName, String[] args) throws Exception { - CliToolConfig.Cmd cmd = config.cmd(cmdName); - return parse(cmd, args); - } - - public Command parse(CliToolConfig.Cmd cmd, String[] args) throws Exception { - CommandLineParser parser = new DefaultParser(); - CommandLine cli = parser.parse(CliToolConfig.OptionsSource.HELP.options(), args, true); - if (cli.hasOption("h")) { - return helpCmd(cmd); - } - try { - cli = parser.parse(cmd.options(), args, cmd.isStopAtNonOption()); - } catch (AlreadySelectedException|MissingArgumentException|MissingOptionException|UnrecognizedOptionException e) { - // intentionally drop the stack trace here as these are really user errors, - // the stack trace into cli parsing lib is not important - throw new UserError(ExitStatus.USAGE, e.toString()); - } - - if (cli.hasOption("v")) { - terminal.setVerbosity(Terminal.Verbosity.VERBOSE); - } else if (cli.hasOption("s")) { - terminal.setVerbosity(Terminal.Verbosity.SILENT); - } else { - terminal.setVerbosity(Terminal.Verbosity.NORMAL); - } - return parse(cmd.name(), cli); - } - - protected Command.Help helpCmd(CliToolConfig.Cmd cmd) { - return new Command.Help(cmd, terminal); - } - - protected static Command.Exit exitCmd(ExitStatus status) { - return new Command.Exit(null, status, null); - } - - protected static Command.Exit exitCmd(ExitStatus status, Terminal terminal, String msg, Object... args) { - return new Command.Exit(String.format(Locale.ROOT, msg, args), status, terminal); - } - - protected abstract Command parse(String cmdName, CommandLine cli) throws Exception; - - public static abstract class Command { - - protected final Terminal terminal; - - protected Command(Terminal terminal) { - this.terminal = terminal; - } - - public abstract ExitStatus execute(Settings settings, Environment env) throws Exception; - - public static class Help extends Command { - - private final CliToolConfig.Cmd cmd; - - private Help(CliToolConfig.Cmd cmd, Terminal terminal) { - super(terminal); - this.cmd = cmd; - } - - @Override - public ExitStatus execute(Settings settings, Environment env) throws Exception { - cmd.printUsage(terminal); - return ExitStatus.OK_AND_EXIT; - } - } - - public static class Exit extends Command { - private final String msg; - private final ExitStatus status; - - private Exit(String msg, ExitStatus status, Terminal terminal) { - super(terminal); - this.msg = msg; - this.status = status; - } - - @Override - public ExitStatus execute(Settings settings, Environment env) throws Exception { - if (msg != null) { - if (status != ExitStatus.OK) { - terminal.println(Terminal.Verbosity.SILENT, "ERROR: " + msg); - } else { - terminal.println(msg); - } - } - return status; - } - - public ExitStatus status() { - return status; - } - } - } - - - -} - diff --git a/core/src/main/java/org/elasticsearch/common/cli/CliToolConfig.java b/core/src/main/java/org/elasticsearch/common/cli/CliToolConfig.java deleted file mode 100644 index d0ba897b33d..00000000000 --- a/core/src/main/java/org/elasticsearch/common/cli/CliToolConfig.java +++ /dev/null @@ -1,302 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.cli; - -import org.apache.commons.cli.Option; -import org.apache.commons.cli.OptionGroup; -import org.apache.commons.cli.Options; - -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; - -/** - * - */ -public class CliToolConfig { - - public static Builder config(String name, Class toolType) { - return new Builder(name, toolType); - } - - private final Class toolType; - private final String name; - private final Map cmds; - - private static final HelpPrinter helpPrinter = new HelpPrinter(); - - private CliToolConfig(String name, Class toolType, Cmd[] cmds) { - this.name = name; - this.toolType = toolType; - final Map cmdsMapping = new HashMap<>(); - for (int i = 0; i < cmds.length; i++) { - cmdsMapping.put(cmds[i].name, cmds[i]); - } - this.cmds = Collections.unmodifiableMap(cmdsMapping); - } - - public boolean isSingle() { - return cmds.size() == 1; - } - - public Cmd single() { - assert isSingle() : "Requesting single command on a multi-command tool"; - return cmds.values().iterator().next(); - } - - public Class toolType() { - return toolType; - } - - public String name() { - return name; - } - - public Collection cmds() { - return cmds.values(); - } - - public Cmd cmd(String name) { - return cmds.get(name); - } - - public void printUsage(Terminal terminal) { - helpPrinter.print(this, terminal); - } - - public static class Builder { - - public static Cmd.Builder cmd(String name, Class cmdType) { - return new Cmd.Builder(name, cmdType); - } - - public static OptionBuilder option(String shortName, String longName) { - return new OptionBuilder(shortName, longName); - } - - public static Option.Builder optionBuilder(String shortName, String longName) { - return Option.builder(shortName).argName(longName).longOpt(longName); - } - - public static OptionGroupBuilder optionGroup(boolean required) { - return new OptionGroupBuilder(required); - } - - private final Class toolType; - private final String name; - private Cmd[] cmds; - - private Builder(String name, Class toolType) { - this.name = name; - this.toolType = toolType; - } - - public Builder cmds(Cmd.Builder... cmds) { - this.cmds = new Cmd[cmds.length]; - for (int i = 0; i < cmds.length; i++) { - this.cmds[i] = cmds[i].build(); - this.cmds[i].toolName = name; - } - return this; - } - - public Builder cmds(Cmd... cmds) { - for (int i = 0; i < cmds.length; i++) { - cmds[i].toolName = name; - } - this.cmds = cmds; - return this; - } - - public CliToolConfig build() { - return new CliToolConfig(name, toolType, cmds); - } - } - - public static class Cmd { - - private String toolName; - private final String name; - private final Class cmdType; - private final Options options; - private final boolean stopAtNonOption; - - private Cmd(String name, Class cmdType, Options options, boolean stopAtNonOption) { - this.name = name; - this.cmdType = cmdType; - this.options = options; - this.stopAtNonOption = stopAtNonOption; - OptionsSource.VERBOSITY.populate(options); - } - - public Class cmdType() { - return cmdType; - } - - public String name() { - return name; - } - - public Options options() { - return options; - } - - public boolean isStopAtNonOption() { - return stopAtNonOption; - } - - public void printUsage(Terminal terminal) { - helpPrinter.print(toolName, this, terminal); - } - - public static class Builder { - - private final String name; - private final Class cmdType; - private Options options = new Options(); - private boolean stopAtNonOption = false; - - private Builder(String name, Class cmdType) { - this.name = name; - this.cmdType = cmdType; - } - - public Builder options(OptionBuilder... optionBuilder) { - for (int i = 0; i < optionBuilder.length; i++) { - options.addOption(optionBuilder[i].build()); - } - return this; - } - - public Builder options(Option.Builder... optionBuilders) { - for (int i = 0; i < optionBuilders.length; i++) { - options.addOption(optionBuilders[i].build()); - } - return this; - } - - public Builder optionGroups(OptionGroupBuilder... optionGroupBuilders) { - for (OptionGroupBuilder builder : optionGroupBuilders) { - options.addOptionGroup(builder.build()); - } - return this; - } - - /** - * @param stopAtNonOption if true an unrecognized argument stops - * the parsing and the remaining arguments are added to the - * args list. If false an unrecognized - * argument triggers a ParseException. - */ - public Builder stopAtNonOption(boolean stopAtNonOption) { - this.stopAtNonOption = stopAtNonOption; - return this; - } - - public Cmd build() { - return new Cmd(name, cmdType, options, stopAtNonOption); - } - } - } - - public static class OptionBuilder { - - private final Option option; - - private OptionBuilder(String shortName, String longName) { - option = new Option(shortName, ""); - option.setLongOpt(longName); - option.setArgName(longName); - } - - public OptionBuilder required(boolean required) { - option.setRequired(required); - return this; - } - - public OptionBuilder hasArg(boolean optional) { - option.setOptionalArg(optional); - option.setArgs(1); - return this; - } - - public Option build() { - return option; - } - } - - public static class OptionGroupBuilder { - - private OptionGroup group; - - private OptionGroupBuilder(boolean required) { - group = new OptionGroup(); - group.setRequired(required); - } - - public OptionGroupBuilder options(OptionBuilder... optionBuilders) { - for (OptionBuilder builder : optionBuilders) { - group.addOption(builder.build()); - } - return this; - } - - public OptionGroup build() { - return group; - } - - } - - static abstract class OptionsSource { - - static final OptionsSource HELP = new OptionsSource() { - - @Override - void populate(Options options) { - options.addOption(new OptionBuilder("h", "help").required(false).build()); - } - }; - - static final OptionsSource VERBOSITY = new OptionsSource() { - @Override - void populate(Options options) { - OptionGroup verbosityGroup = new OptionGroup(); - verbosityGroup.setRequired(false); - verbosityGroup.addOption(new OptionBuilder("s", "silent").required(false).build()); - verbosityGroup.addOption(new OptionBuilder("v", "verbose").required(false).build()); - options.addOptionGroup(verbosityGroup); - } - }; - - private Options options; - - Options options() { - if (options == null) { - options = new Options(); - populate(options); - } - return options; - } - - abstract void populate(Options options); - - } -} diff --git a/core/src/main/java/org/elasticsearch/common/cli/HelpPrinter.java b/core/src/main/java/org/elasticsearch/common/cli/HelpPrinter.java deleted file mode 100644 index ada6cc33a19..00000000000 --- a/core/src/main/java/org/elasticsearch/common/cli/HelpPrinter.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.cli; - -import org.elasticsearch.common.io.Streams; -import org.elasticsearch.common.util.Callback; - -import java.io.IOException; -import java.io.InputStream; - -/** - * - */ -public class HelpPrinter { - - private static final String HELP_FILE_EXT = ".help"; - - public void print(CliToolConfig config, Terminal terminal) { - print(config.toolType(), config.name(), terminal); - } - - public void print(String toolName, CliToolConfig.Cmd cmd, Terminal terminal) { - print(cmd.cmdType(), toolName + "-" + cmd.name(), terminal); - } - - private static void print(Class clazz, String name, final Terminal terminal) { - terminal.println(Terminal.Verbosity.SILENT, ""); - try (InputStream input = clazz.getResourceAsStream(name + HELP_FILE_EXT)) { - Streams.readAllLines(input, new Callback() { - @Override - public void handle(String line) { - terminal.println(Terminal.Verbosity.SILENT, line); - } - }); - } catch (IOException ioe) { - throw new RuntimeException(ioe); - } - terminal.println(Terminal.Verbosity.SILENT, ""); - } -} diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java index 4a9c8441072..52314c98ef1 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java @@ -314,7 +314,7 @@ public class PolygonBuilder extends ShapeBuilder { double shiftOffset = any.coordinate.x > DATELINE ? DATELINE : (any.coordinate.x < -DATELINE ? -DATELINE : 0); if (debugEnabled()) { - LOGGER.debug("shift: {[]}", shiftOffset); + LOGGER.debug("shift: [{}]", shiftOffset); } // run along the border of the component, collect the @@ -392,9 +392,9 @@ public class PolygonBuilder extends ShapeBuilder { if(debugEnabled()) { for (int i = 0; i < result.length; i++) { - LOGGER.debug("Component {[]}:", i); + LOGGER.debug("Component [{}]:", i); for (int j = 0; j < result[i].length; j++) { - LOGGER.debug("\t" + Arrays.toString(result[i][j])); + LOGGER.debug("\t{}", Arrays.toString(result[i][j])); } } } @@ -444,7 +444,7 @@ public class PolygonBuilder extends ShapeBuilder { // is an arbitrary point of the hole. The polygon edge next to this point // is part of the polygon the hole belongs to. if (debugEnabled()) { - LOGGER.debug("Holes: " + Arrays.toString(holes)); + LOGGER.debug("Holes: {}", Arrays.toString(holes)); } for (int i = 0; i < numHoles; i++) { final Edge current = new Edge(holes[i].coordinate, holes[i].next); @@ -464,9 +464,9 @@ public class PolygonBuilder extends ShapeBuilder { final int component = -edges[index].component - numHoles - 1; if(debugEnabled()) { - LOGGER.debug("\tposition ("+index+") of edge "+current+": " + edges[index]); - LOGGER.debug("\tComponent: " + component); - LOGGER.debug("\tHole intersections ("+current.coordinate.x+"): " + Arrays.toString(edges)); + LOGGER.debug("\tposition ({}) of edge {}: {}", index, current, edges[index]); + LOGGER.debug("\tComponent: {}", component); + LOGGER.debug("\tHole intersections ({}): {}", current.coordinate.x, Arrays.toString(edges)); } components.get(component).add(points[i]); diff --git a/core/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java b/core/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java index 870b5f61466..b792a85d34c 100644 --- a/core/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java +++ b/core/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java @@ -19,6 +19,8 @@ package org.elasticsearch.common.logging; +import org.elasticsearch.common.SuppressLoggerChecks; + /** * A logger that logs deprecation notices. */ @@ -45,6 +47,7 @@ public class DeprecationLogger { /** * Logs a deprecated message. */ + @SuppressLoggerChecks(reason = "safely delegates to logger") public void deprecated(String msg, Object... params) { logger.debug(msg, params); } diff --git a/core/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java b/core/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java index 94ade9334d7..c0951c47df1 100644 --- a/core/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java +++ b/core/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java @@ -21,6 +21,7 @@ package org.elasticsearch.common.logging; import org.apache.log4j.Logger; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import java.util.Locale; @@ -30,9 +31,10 @@ import java.util.Locale; public abstract class ESLoggerFactory { public static final Setting LOG_DEFAULT_LEVEL_SETTING = - new Setting<>("logger.level", LogLevel.INFO.name(), LogLevel::parse, false, Setting.Scope.CLUSTER); + new Setting<>("logger.level", LogLevel.INFO.name(), LogLevel::parse, Property.NodeScope); public static final Setting LOG_LEVEL_SETTING = - Setting.prefixKeySetting("logger.", LogLevel.INFO.name(), LogLevel::parse, true, Setting.Scope.CLUSTER); + Setting.prefixKeySetting("logger.", LogLevel.INFO.name(), LogLevel::parse, + Property.Dynamic, Property.NodeScope); public static ESLogger getLogger(String prefix, String name) { prefix = prefix == null ? null : prefix.intern(); diff --git a/core/src/main/java/org/elasticsearch/common/logging/TerminalAppender.java b/core/src/main/java/org/elasticsearch/common/logging/TerminalAppender.java index 7031a62a999..e967ad9d79e 100644 --- a/core/src/main/java/org/elasticsearch/common/logging/TerminalAppender.java +++ b/core/src/main/java/org/elasticsearch/common/logging/TerminalAppender.java @@ -22,7 +22,7 @@ package org.elasticsearch.common.logging; import org.apache.log4j.AppenderSkeleton; import org.apache.log4j.spi.LoggingEvent; -import org.elasticsearch.common.cli.Terminal; +import org.elasticsearch.cli.Terminal; /** * TerminalAppender logs event to Terminal.DEFAULT. It is used for example by the PluginCli. diff --git a/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java index 54e5738e78c..8508a8a2e40 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -111,7 +111,7 @@ public class Lucene { try { return Version.parse(version); } catch (ParseException e) { - logger.warn("no version match {}, default to {}", version, defaultVersion, e); + logger.warn("no version match {}, default to {}", e, version, defaultVersion); return defaultVersion; } } diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQuery.java b/core/src/main/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQuery.java index 754d76fed27..52de9a7e5db 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQuery.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQuery.java @@ -134,7 +134,7 @@ public class MultiPhrasePrefixQuery extends Query { if (termArrays.isEmpty()) { return new MatchNoDocsQuery(); } - MultiPhraseQuery query = new MultiPhraseQuery(); + MultiPhraseQuery.Builder query = new MultiPhraseQuery.Builder(); query.setSlop(slop); int sizeMinus1 = termArrays.size() - 1; for (int i = 0; i < sizeMinus1; i++) { @@ -153,7 +153,7 @@ public class MultiPhrasePrefixQuery extends Query { return Queries.newMatchNoDocsQuery(); } query.add(terms.toArray(Term.class), position); - return query.rewrite(reader); + return query.build(); } private void getPrefixTerms(ObjectHashSet terms, final Term prefix, final IndexReader reader) throws IOException { diff --git a/core/src/main/java/org/elasticsearch/common/network/IfConfig.java b/core/src/main/java/org/elasticsearch/common/network/IfConfig.java index ea33274fad1..282f348c81b 100644 --- a/core/src/main/java/org/elasticsearch/common/network/IfConfig.java +++ b/core/src/main/java/org/elasticsearch/common/network/IfConfig.java @@ -31,14 +31,14 @@ import java.net.SocketException; import java.util.List; import java.util.Locale; -/** +/** * Simple class to log {@code ifconfig}-style output at DEBUG logging. */ final class IfConfig { - private static final ESLogger logger = Loggers.getLogger(IfConfig.class); + private static final ESLogger logger = Loggers.getLogger(IfConfig.class); private static final String INDENT = " "; - + /** log interface configuration at debug level, if its enabled */ static void logIfNecessary() { if (logger.isDebugEnabled()) { @@ -49,7 +49,7 @@ final class IfConfig { } } } - + /** perform actual logging: might throw exception if things go wrong */ private static void doLogging() throws IOException { StringBuilder msg = new StringBuilder(); @@ -59,14 +59,14 @@ final class IfConfig { // ordinary name msg.append(nic.getName()); msg.append(System.lineSeparator()); - + // display name (e.g. on windows) if (!nic.getName().equals(nic.getDisplayName())) { msg.append(INDENT); msg.append(nic.getDisplayName()); msg.append(System.lineSeparator()); } - + // addresses: v4 first, then v6 List addresses = nic.getInterfaceAddresses(); for (InterfaceAddress address : addresses) { @@ -76,7 +76,7 @@ final class IfConfig { msg.append(System.lineSeparator()); } } - + for (InterfaceAddress address : addresses) { if (address.getAddress() instanceof Inet6Address) { msg.append(INDENT); @@ -84,7 +84,7 @@ final class IfConfig { msg.append(System.lineSeparator()); } } - + // hardware address byte hardware[] = nic.getHardwareAddress(); if (hardware != null) { @@ -98,19 +98,19 @@ final class IfConfig { } msg.append(System.lineSeparator()); } - + // attributes msg.append(INDENT); msg.append(formatFlags(nic)); msg.append(System.lineSeparator()); } - logger.debug("configuration:" + System.lineSeparator() + "{}", msg.toString()); + logger.debug("configuration:{}{}", System.lineSeparator(), msg); } - + /** format internet address: java's default doesn't include everything useful */ private static String formatAddress(InterfaceAddress interfaceAddress) throws IOException { StringBuilder sb = new StringBuilder(); - + InetAddress address = interfaceAddress.getAddress(); if (address instanceof Inet6Address) { sb.append("inet6 "); @@ -122,10 +122,10 @@ final class IfConfig { sb.append(NetworkAddress.formatAddress(address)); int netmask = 0xFFFFFFFF << (32 - interfaceAddress.getNetworkPrefixLength()); sb.append(" netmask:" + NetworkAddress.formatAddress(InetAddress.getByAddress(new byte[] { - (byte)(netmask >>> 24), - (byte)(netmask >>> 16 & 0xFF), - (byte)(netmask >>> 8 & 0xFF), - (byte)(netmask & 0xFF) + (byte)(netmask >>> 24), + (byte)(netmask >>> 16 & 0xFF), + (byte)(netmask >>> 8 & 0xFF), + (byte)(netmask & 0xFF) }))); InetAddress broadcast = interfaceAddress.getBroadcast(); if (broadcast != null) { @@ -141,7 +141,7 @@ final class IfConfig { } return sb.toString(); } - + /** format network interface flags */ private static String formatFlags(NetworkInterface nic) throws SocketException { StringBuilder flags = new StringBuilder(); diff --git a/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java b/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java index b0598469d3a..1a54ad2753a 100644 --- a/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java +++ b/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java @@ -28,8 +28,8 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.Setting.Scope; import org.elasticsearch.common.util.ExtensionPoint; import org.elasticsearch.http.HttpServer; import org.elasticsearch.http.HttpServerTransport; @@ -155,10 +155,11 @@ public class NetworkModule extends AbstractModule { public static final String LOCAL_TRANSPORT = "local"; public static final String NETTY_TRANSPORT = "netty"; - public static final Setting HTTP_TYPE_SETTING = Setting.simpleString("http.type", false, Scope.CLUSTER); - public static final Setting HTTP_ENABLED = Setting.boolSetting("http.enabled", true, false, Scope.CLUSTER); - public static final Setting TRANSPORT_SERVICE_TYPE_SETTING = Setting.simpleString("transport.service.type", false, Scope.CLUSTER); - public static final Setting TRANSPORT_TYPE_SETTING = Setting.simpleString("transport.type", false, Scope.CLUSTER); + public static final Setting HTTP_TYPE_SETTING = Setting.simpleString("http.type", Property.NodeScope); + public static final Setting HTTP_ENABLED = Setting.boolSetting("http.enabled", true, Property.NodeScope); + public static final Setting TRANSPORT_SERVICE_TYPE_SETTING = + Setting.simpleString("transport.service.type", Property.NodeScope); + public static final Setting TRANSPORT_TYPE_SETTING = Setting.simpleString("transport.type", Property.NodeScope); diff --git a/core/src/main/java/org/elasticsearch/common/network/NetworkService.java b/core/src/main/java/org/elasticsearch/common/network/NetworkService.java index 5e8dbc4dcad..ff1f3912cc5 100644 --- a/core/src/main/java/org/elasticsearch/common/network/NetworkService.java +++ b/core/src/main/java/org/elasticsearch/common/network/NetworkService.java @@ -22,6 +22,7 @@ package org.elasticsearch.common.network; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; @@ -34,6 +35,7 @@ import java.util.HashSet; import java.util.List; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.TimeUnit; +import java.util.function.Function; /** * @@ -43,24 +45,33 @@ public class NetworkService extends AbstractComponent { /** By default, we bind to loopback interfaces */ public static final String DEFAULT_NETWORK_HOST = "_local_"; - public static final Setting> GLOBAL_NETWORK_HOST_SETTING = Setting.listSetting("network.host", Arrays.asList(DEFAULT_NETWORK_HOST), - s -> s, false, Setting.Scope.CLUSTER); - public static final Setting> GLOBAL_NETWORK_BINDHOST_SETTING = Setting.listSetting("network.bind_host", GLOBAL_NETWORK_HOST_SETTING, - s -> s, false, Setting.Scope.CLUSTER); - public static final Setting> GLOBAL_NETWORK_PUBLISHHOST_SETTING = Setting.listSetting("network.publish_host", GLOBAL_NETWORK_HOST_SETTING, - s -> s, false, Setting.Scope.CLUSTER); - public static final Setting NETWORK_SERVER = Setting.boolSetting("network.server", true, false, Setting.Scope.CLUSTER); + public static final Setting> GLOBAL_NETWORK_HOST_SETTING = + Setting.listSetting("network.host", Arrays.asList(DEFAULT_NETWORK_HOST), Function.identity(), Property.NodeScope); + public static final Setting> GLOBAL_NETWORK_BINDHOST_SETTING = + Setting.listSetting("network.bind_host", GLOBAL_NETWORK_HOST_SETTING, Function.identity(), Property.NodeScope); + public static final Setting> GLOBAL_NETWORK_PUBLISHHOST_SETTING = + Setting.listSetting("network.publish_host", GLOBAL_NETWORK_HOST_SETTING, Function.identity(), Property.NodeScope); + public static final Setting NETWORK_SERVER = Setting.boolSetting("network.server", true, Property.NodeScope); public static final class TcpSettings { - public static final Setting TCP_NO_DELAY = Setting.boolSetting("network.tcp.no_delay", true, false, Setting.Scope.CLUSTER); - public static final Setting TCP_KEEP_ALIVE = Setting.boolSetting("network.tcp.keep_alive", true, false, Setting.Scope.CLUSTER); - public static final Setting TCP_REUSE_ADDRESS = Setting.boolSetting("network.tcp.reuse_address", NetworkUtils.defaultReuseAddress(), false, Setting.Scope.CLUSTER); - public static final Setting TCP_SEND_BUFFER_SIZE = Setting.byteSizeSetting("network.tcp.send_buffer_size", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER); - public static final Setting TCP_RECEIVE_BUFFER_SIZE = Setting.byteSizeSetting("network.tcp.receive_buffer_size", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER); - public static final Setting TCP_BLOCKING = Setting.boolSetting("network.tcp.blocking", false, false, Setting.Scope.CLUSTER); - public static final Setting TCP_BLOCKING_SERVER = Setting.boolSetting("network.tcp.blocking_server", TCP_BLOCKING, false, Setting.Scope.CLUSTER); - public static final Setting TCP_BLOCKING_CLIENT = Setting.boolSetting("network.tcp.blocking_client", TCP_BLOCKING, false, Setting.Scope.CLUSTER); - public static final Setting TCP_CONNECT_TIMEOUT = Setting.timeSetting("network.tcp.connect_timeout", new TimeValue(30, TimeUnit.SECONDS), false, Setting.Scope.CLUSTER); + public static final Setting TCP_NO_DELAY = + Setting.boolSetting("network.tcp.no_delay", true, Property.NodeScope); + public static final Setting TCP_KEEP_ALIVE = + Setting.boolSetting("network.tcp.keep_alive", true, Property.NodeScope); + public static final Setting TCP_REUSE_ADDRESS = + Setting.boolSetting("network.tcp.reuse_address", NetworkUtils.defaultReuseAddress(), Property.NodeScope); + public static final Setting TCP_SEND_BUFFER_SIZE = + Setting.byteSizeSetting("network.tcp.send_buffer_size", new ByteSizeValue(-1), Property.NodeScope); + public static final Setting TCP_RECEIVE_BUFFER_SIZE = + Setting.byteSizeSetting("network.tcp.receive_buffer_size", new ByteSizeValue(-1), Property.NodeScope); + public static final Setting TCP_BLOCKING = + Setting.boolSetting("network.tcp.blocking", false, Property.NodeScope); + public static final Setting TCP_BLOCKING_SERVER = + Setting.boolSetting("network.tcp.blocking_server", TCP_BLOCKING, Property.NodeScope); + public static final Setting TCP_BLOCKING_CLIENT = + Setting.boolSetting("network.tcp.blocking_client", TCP_BLOCKING, Property.NodeScope); + public static final Setting TCP_CONNECT_TIMEOUT = + Setting.timeSetting("network.tcp.connect_timeout", new TimeValue(30, TimeUnit.SECONDS), Property.NodeScope); } /** diff --git a/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java index 453fc3f9a36..baed9c0849f 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java @@ -44,19 +44,19 @@ public abstract class AbstractScopedSettings extends AbstractComponent { private final List> settingUpdaters = new CopyOnWriteArrayList<>(); private final Map> complexMatchers; private final Map> keySettings; - private final Setting.Scope scope; + private final Setting.Property scope; private static final Pattern KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])*[-\\w]+$"); private static final Pattern GROUP_KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])+$"); - protected AbstractScopedSettings(Settings settings, Set> settingsSet, Setting.Scope scope) { + protected AbstractScopedSettings(Settings settings, Set> settingsSet, Setting.Property scope) { super(settings); this.lastSettingsApplied = Settings.EMPTY; this.scope = scope; Map> complexMatchers = new HashMap<>(); Map> keySettings = new HashMap<>(); for (Setting setting : settingsSet) { - if (setting.getScope() != scope) { - throw new IllegalArgumentException("Setting must be a " + scope + " setting but was: " + setting.getScope()); + if (setting.getProperties().contains(scope) == false) { + throw new IllegalArgumentException("Setting must be a " + scope + " setting but has: " + setting.getProperties()); } if (isValidKey(setting.getKey()) == false && (setting.isGroupSetting() && isValidGroupKey(setting.getKey())) == false) { throw new IllegalArgumentException("illegal settings key: [" + setting.getKey() + "]"); @@ -96,7 +96,7 @@ public abstract class AbstractScopedSettings extends AbstractComponent { return GROUP_KEY_PATTERN.matcher(key).matches(); } - public Setting.Scope getScope() { + public Setting.Property getScope() { return this.scope; } @@ -342,8 +342,9 @@ public abstract class AbstractScopedSettings extends AbstractComponent { * Returns the value for the given setting. */ public T get(Setting setting) { - if (setting.getScope() != scope) { - throw new IllegalArgumentException("settings scope doesn't match the setting scope [" + this.scope + "] != [" + setting.getScope() + "]"); + if (setting.getProperties().contains(scope) == false) { + throw new IllegalArgumentException("settings scope doesn't match the setting scope [" + this.scope + "] not in [" + + setting.getProperties() + "]"); } if (get(setting.getKey()) == null) { throw new IllegalArgumentException("setting " + setting.getKey() + " has not been registered"); diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 3215f3db05a..af3504cd8aa 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -47,6 +47,7 @@ import org.elasticsearch.cluster.service.InternalClusterService; import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.discovery.DiscoveryModule; @@ -103,7 +104,7 @@ import java.util.function.Predicate; */ public final class ClusterSettings extends AbstractScopedSettings { public ClusterSettings(Settings nodeSettings, Set> settingsSet) { - super(nodeSettings, settingsSet, Setting.Scope.CLUSTER); + super(nodeSettings, settingsSet, Property.NodeScope); addSettingsUpdater(new LoggingSettingUpdater(nodeSettings)); } diff --git a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index b8b75147740..da6c34bdf4a 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -22,6 +22,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.gateway.PrimaryShardAllocator; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; @@ -51,7 +52,7 @@ import java.util.function.Predicate; /** * Encapsulates all valid index level settings. - * @see org.elasticsearch.common.settings.Setting.Scope#INDEX + * @see Property#IndexScope */ public final class IndexScopedSettings extends AbstractScopedSettings { @@ -136,22 +137,22 @@ public final class IndexScopedSettings extends AbstractScopedSettings { EngineConfig.INDEX_CODEC_SETTING, IndexWarmer.INDEX_NORMS_LOADING_SETTING, // validate that built-in similarities don't get redefined - Setting.groupSetting("index.similarity.", false, Setting.Scope.INDEX, (s) -> { + Setting.groupSetting("index.similarity.", (s) -> { Map groups = s.getAsGroups(); for (String key : SimilarityService.BUILT_IN.keySet()) { if (groups.containsKey(key)) { throw new IllegalArgumentException("illegal value for [index.similarity."+ key + "] cannot redefine built-in similarity"); } } - }), // this allows similarity settings to be passed - Setting.groupSetting("index.analysis.", false, Setting.Scope.INDEX) // this allows analysis settings to be passed + }, Property.IndexScope), // this allows similarity settings to be passed + Setting.groupSetting("index.analysis.", Property.IndexScope) // this allows analysis settings to be passed ))); public static final IndexScopedSettings DEFAULT_SCOPED_SETTINGS = new IndexScopedSettings(Settings.EMPTY, IndexScopedSettings.BUILT_IN_INDEX_SETTINGS); public IndexScopedSettings(Settings settings, Set> settingsSet) { - super(settings, settingsSet, Setting.Scope.INDEX); + super(settings, settingsSet, Property.IndexScope); } private IndexScopedSettings(Settings settings, IndexScopedSettings other, IndexMetaData metaData) { diff --git a/core/src/main/java/org/elasticsearch/common/settings/Setting.java b/core/src/main/java/org/elasticsearch/common/settings/Setting.java index c31b905abbf..f0e1b2e64ea 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/core/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -25,7 +25,9 @@ import org.elasticsearch.action.support.ToXContentToBytes; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.MemorySizeValue; @@ -37,6 +39,10 @@ import org.elasticsearch.common.xcontent.XContentType; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.EnumSet; +import java.util.Enumeration; import java.util.List; import java.util.Map; import java.util.Objects; @@ -49,12 +55,12 @@ import java.util.stream.Collectors; /** * A setting. Encapsulates typical stuff like default value, parsing, and scope. - * Some (dynamic=true) can by modified at run time using the API. + * Some (SettingsProperty.Dynamic) can by modified at run time using the API. * All settings inside elasticsearch or in any of the plugins should use this type-safe and generic settings infrastructure * together with {@link AbstractScopedSettings}. This class contains several utility methods that makes it straight forward * to add settings for the majority of the cases. For instance a simple boolean settings can be defined like this: *
{@code
- * public static final Setting; MY_BOOLEAN = Setting.boolSetting("my.bool.setting", true, false, Scope.CLUSTER);}
+ * public static final Setting; MY_BOOLEAN = Setting.boolSetting("my.bool.setting", true, SettingsProperty.NodeScope);}
  * 
* To retrieve the value of the setting a {@link Settings} object can be passed directly to the {@link Setting#get(Settings)} method. *
@@ -65,32 +71,81 @@ import java.util.stream.Collectors;
  * public enum Color {
  *     RED, GREEN, BLUE;
  * }
- * public static final Setting MY_BOOLEAN = new Setting<>("my.color.setting", Color.RED.toString(), Color::valueOf, false, Scope.CLUSTER);
+ * public static final Setting MY_BOOLEAN =
+ *     new Setting<>("my.color.setting", Color.RED.toString(), Color::valueOf, SettingsProperty.NodeScope);
  * }
  * 
*/ public class Setting extends ToXContentToBytes { + + public enum Property { + /** + * should be filtered in some api (mask password/credentials) + */ + Filtered, + + /** + * iff this setting can be dynamically updateable + */ + Dynamic, + + /** + * mark this setting as deprecated + */ + Deprecated, + + /** + * Node scope + */ + NodeScope, + + /** + * Index scope + */ + IndexScope + } + + private static final ESLogger logger = Loggers.getLogger(Setting.class); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger); + private final Key key; protected final Function defaultValue; private final Function parser; - private final boolean dynamic; - private final Scope scope; + private final EnumSet properties; + + private static final EnumSet EMPTY_PROPERTIES = EnumSet.noneOf(Property.class); /** - * Creates a new Setting instance + * Creates a new Setting instance. When no scope is provided, we default to {@link Property#NodeScope}. * @param key the settings key for this setting. * @param defaultValue a default value function that returns the default values string representation. * @param parser a parser that parses the string rep into a complex datatype. - * @param dynamic true iff this setting can be dynamically updateable - * @param scope the scope of this setting + * @param properties properties for this setting like scope, filtering... */ - public Setting(Key key, Function defaultValue, Function parser, boolean dynamic, Scope scope) { + public Setting(Key key, Function defaultValue, Function parser, Property... properties) { assert parser.apply(defaultValue.apply(Settings.EMPTY)) != null || this.isGroupSetting(): "parser returned null"; this.key = key; this.defaultValue = defaultValue; this.parser = parser; - this.dynamic = dynamic; - this.scope = scope; + if (properties == null) { + throw new IllegalArgumentException("properties can not be null for setting [" + key + "]"); + } + if (properties.length == 0) { + this.properties = EMPTY_PROPERTIES; + } else { + this.properties = EnumSet.copyOf(Arrays.asList(properties)); + } + } + + /** + * Creates a new Setting instance + * @param key the settings key for this setting. + * @param defaultValue a default value. + * @param parser a parser that parses the string rep into a complex datatype. + * @param properties properties for this setting like scope, filtering... + */ + public Setting(String key, String defaultValue, Function parser, Property... properties) { + this(key, s -> defaultValue, parser, properties); } /** @@ -98,11 +153,10 @@ public class Setting extends ToXContentToBytes { * @param key the settings key for this setting. * @param defaultValue a default value function that returns the default values string representation. * @param parser a parser that parses the string rep into a complex datatype. - * @param dynamic true iff this setting can be dynamically updateable - * @param scope the scope of this setting + * @param properties properties for this setting like scope, filtering... */ - public Setting(String key, Function defaultValue, Function parser, boolean dynamic, Scope scope) { - this(new SimpleKey(key), defaultValue, parser, dynamic, scope); + public Setting(String key, Function defaultValue, Function parser, Property... properties) { + this(new SimpleKey(key), defaultValue, parser, properties); } /** @@ -110,11 +164,10 @@ public class Setting extends ToXContentToBytes { * @param key the settings key for this setting. * @param fallBackSetting a setting to fall back to if the current setting is not set. * @param parser a parser that parses the string rep into a complex datatype. - * @param dynamic true iff this setting can be dynamically updateable - * @param scope the scope of this setting + * @param properties properties for this setting like scope, filtering... */ - public Setting(String key, Setting fallBackSetting, Function parser, boolean dynamic, Scope scope) { - this(key, fallBackSetting::getRaw, parser, dynamic, scope); + public Setting(String key, Setting fallBackSetting, Function parser, Property... properties) { + this(key, fallBackSetting::getRaw, parser, properties); } /** @@ -136,17 +189,46 @@ public class Setting extends ToXContentToBytes { } /** - * Returns true iff this setting is dynamically updateable, otherwise false + * Returns true if this setting is dynamically updateable, otherwise false */ public final boolean isDynamic() { - return dynamic; + return properties.contains(Property.Dynamic); } /** - * Returns the settings scope + * Returns the setting properties + * @see Property */ - public final Scope getScope() { - return scope; + public EnumSet getProperties() { + return properties; + } + + /** + * Returns true if this setting must be filtered, otherwise false + */ + public boolean isFiltered() { + return properties.contains(Property.Filtered); + } + + /** + * Returns true if this setting has a node scope, otherwise false + */ + public boolean hasNodeScope() { + return properties.contains(Property.NodeScope); + } + + /** + * Returns true if this setting has an index scope, otherwise false + */ + public boolean hasIndexScope() { + return properties.contains(Property.IndexScope); + } + + /** + * Returns true if this setting is deprecated, otherwise false + */ + public boolean isDeprecated() { + return properties.contains(Property.Deprecated); } /** @@ -209,6 +291,12 @@ public class Setting extends ToXContentToBytes { * instead. This is useful if the value can't be parsed due to an invalid value to access the actual value. */ public String getRaw(Settings settings) { + // They're using the setting, so we need to tell them to stop + if (this.isDeprecated() && this.exists(settings)) { + // It would be convenient to show its replacement key, but replacement is often not so simple + deprecationLogger.deprecated("[{}] setting was deprecated in Elasticsearch and it will be removed in a future release! " + + "See the breaking changes lists in the documentation for details", getKey()); + } return settings.get(getKey(), defaultValue.apply(settings)); } @@ -225,8 +313,7 @@ public class Setting extends ToXContentToBytes { public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field("key", key.toString()); - builder.field("type", scope.name()); - builder.field("dynamic", dynamic); + builder.field("properties", properties); builder.field("is_group_setting", isGroupSetting()); builder.field("default", defaultValue.apply(Settings.EMPTY)); builder.endObject(); @@ -248,14 +335,6 @@ public class Setting extends ToXContentToBytes { return this; } - /** - * The settings scope - settings can either be cluster settings or per index settings. - */ - public enum Scope { - CLUSTER, - INDEX; - } - /** * Build a new updater with a noop validator. */ @@ -353,38 +432,34 @@ public class Setting extends ToXContentToBytes { } - public Setting(String key, String defaultValue, Function parser, boolean dynamic, Scope scope) { - this(key, (s) -> defaultValue, parser, dynamic, scope); + public static Setting floatSetting(String key, float defaultValue, Property... properties) { + return new Setting<>(key, (s) -> Float.toString(defaultValue), Float::parseFloat, properties); } - public static Setting floatSetting(String key, float defaultValue, boolean dynamic, Scope scope) { - return new Setting<>(key, (s) -> Float.toString(defaultValue), Float::parseFloat, dynamic, scope); - } - - public static Setting floatSetting(String key, float defaultValue, float minValue, boolean dynamic, Scope scope) { + public static Setting floatSetting(String key, float defaultValue, float minValue, Property... properties) { return new Setting<>(key, (s) -> Float.toString(defaultValue), (s) -> { float value = Float.parseFloat(s); if (value < minValue) { throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue); } return value; - }, dynamic, scope); + }, properties); } - public static Setting intSetting(String key, int defaultValue, int minValue, int maxValue, boolean dynamic, Scope scope) { - return new Setting<>(key, (s) -> Integer.toString(defaultValue), (s) -> parseInt(s, minValue, maxValue, key), dynamic, scope); + public static Setting intSetting(String key, int defaultValue, int minValue, int maxValue, Property... properties) { + return new Setting<>(key, (s) -> Integer.toString(defaultValue), (s) -> parseInt(s, minValue, maxValue, key), properties); } - public static Setting intSetting(String key, int defaultValue, int minValue, boolean dynamic, Scope scope) { - return new Setting<>(key, (s) -> Integer.toString(defaultValue), (s) -> parseInt(s, minValue, key), dynamic, scope); + public static Setting intSetting(String key, int defaultValue, int minValue, Property... properties) { + return new Setting<>(key, (s) -> Integer.toString(defaultValue), (s) -> parseInt(s, minValue, key), properties); } - public static Setting longSetting(String key, long defaultValue, long minValue, boolean dynamic, Scope scope) { - return new Setting<>(key, (s) -> Long.toString(defaultValue), (s) -> parseLong(s, minValue, key), dynamic, scope); + public static Setting longSetting(String key, long defaultValue, long minValue, Property... properties) { + return new Setting<>(key, (s) -> Long.toString(defaultValue), (s) -> parseLong(s, minValue, key), properties); } - public static Setting simpleString(String key, boolean dynamic, Scope scope) { - return new Setting<>(key, "", Function.identity(), dynamic, scope); + public static Setting simpleString(String key, Property... properties) { + return new Setting<>(key, s -> "", Function.identity(), properties); } public static int parseInt(String s, int minValue, String key) { @@ -418,51 +493,58 @@ public class Setting extends ToXContentToBytes { return timeValue; } - public static Setting intSetting(String key, int defaultValue, boolean dynamic, Scope scope) { - return intSetting(key, defaultValue, Integer.MIN_VALUE, dynamic, scope); + public static Setting intSetting(String key, int defaultValue, Property... properties) { + return intSetting(key, defaultValue, Integer.MIN_VALUE, properties); } - public static Setting boolSetting(String key, boolean defaultValue, boolean dynamic, Scope scope) { - return new Setting<>(key, (s) -> Boolean.toString(defaultValue), Booleans::parseBooleanExact, dynamic, scope); + public static Setting boolSetting(String key, boolean defaultValue, Property... properties) { + return new Setting<>(key, (s) -> Boolean.toString(defaultValue), Booleans::parseBooleanExact, properties); } - public static Setting boolSetting(String key, Setting fallbackSetting, boolean dynamic, Scope scope) { - return new Setting<>(key, fallbackSetting, Booleans::parseBooleanExact, dynamic, scope); + public static Setting boolSetting(String key, Setting fallbackSetting, Property... properties) { + return new Setting<>(key, fallbackSetting, Booleans::parseBooleanExact, properties); } - public static Setting byteSizeSetting(String key, String percentage, boolean dynamic, Scope scope) { - return new Setting<>(key, (s) -> percentage, (s) -> MemorySizeValue.parseBytesSizeValueOrHeapRatio(s, key), dynamic, scope); + public static Setting byteSizeSetting(String key, String percentage, Property... properties) { + return new Setting<>(key, (s) -> percentage, (s) -> MemorySizeValue.parseBytesSizeValueOrHeapRatio(s, key), properties); } - public static Setting byteSizeSetting(String key, ByteSizeValue value, boolean dynamic, Scope scope) { - return byteSizeSetting(key, (s) -> value.toString(), dynamic, scope); + public static Setting byteSizeSetting(String key, ByteSizeValue value, Property... properties) { + return byteSizeSetting(key, (s) -> value.toString(), properties); } - public static Setting byteSizeSetting(String key, Setting fallbackSettings, boolean dynamic, Scope scope) { - return byteSizeSetting(key, fallbackSettings::getRaw, dynamic, scope); + public static Setting byteSizeSetting(String key, Setting fallbackSettings, + Property... properties) { + return byteSizeSetting(key, fallbackSettings::getRaw, properties); } - public static Setting byteSizeSetting(String key, Function defaultValue, boolean dynamic, Scope scope) { - return new Setting<>(key, defaultValue, (s) -> ByteSizeValue.parseBytesSizeValue(s, key), dynamic, scope); + public static Setting byteSizeSetting(String key, Function defaultValue, + Property... properties) { + return new Setting<>(key, defaultValue, (s) -> ByteSizeValue.parseBytesSizeValue(s, key), properties); } - public static Setting positiveTimeSetting(String key, TimeValue defaultValue, boolean dynamic, Scope scope) { - return timeSetting(key, defaultValue, TimeValue.timeValueMillis(0), dynamic, scope); + public static Setting positiveTimeSetting(String key, TimeValue defaultValue, Property... properties) { + return timeSetting(key, defaultValue, TimeValue.timeValueMillis(0), properties); } - public static Setting> listSetting(String key, List defaultStringValue, Function singleValueParser, boolean dynamic, Scope scope) { - return listSetting(key, (s) -> defaultStringValue, singleValueParser, dynamic, scope); + public static Setting> listSetting(String key, List defaultStringValue, Function singleValueParser, + Property... properties) { + return listSetting(key, (s) -> defaultStringValue, singleValueParser, properties); } - public static Setting> listSetting(String key, Setting> fallbackSetting, Function singleValueParser, boolean dynamic, Scope scope) { - return listSetting(key, (s) -> parseableStringToList(fallbackSetting.getRaw(s)), singleValueParser, dynamic, scope); + public static Setting> listSetting(String key, Setting> fallbackSetting, Function singleValueParser, + Property... properties) { + return listSetting(key, (s) -> parseableStringToList(fallbackSetting.getRaw(s)), singleValueParser, properties); } - public static Setting> listSetting(String key, Function> defaultStringValue, Function singleValueParser, boolean dynamic, Scope scope) { + public static Setting> listSetting(String key, Function> defaultStringValue, + Function singleValueParser, Property... properties) { Function> parser = (s) -> parseableStringToList(s).stream().map(singleValueParser).collect(Collectors.toList()); - return new Setting>(new ListKey(key), (s) -> arrayToParsableString(defaultStringValue.apply(s).toArray(Strings.EMPTY_ARRAY)), parser, dynamic, scope) { + return new Setting>(new ListKey(key), + (s) -> arrayToParsableString(defaultStringValue.apply(s).toArray(Strings.EMPTY_ARRAY)), parser, properties) { + private final Pattern pattern = Pattern.compile(Pattern.quote(key)+"(\\.\\d+)?"); @Override public String getRaw(Settings settings) { String[] array = settings.getAsArray(getKey(), null); @@ -509,11 +591,11 @@ public class Setting extends ToXContentToBytes { throw new ElasticsearchException(ex); } } - public static Setting groupSetting(String key, boolean dynamic, Scope scope) { - return groupSetting(key, dynamic, scope, (s) -> {}); + public static Setting groupSetting(String key, Property... properties) { + return groupSetting(key, (s) -> {}, properties); } - public static Setting groupSetting(String key, boolean dynamic, Scope scope, Consumer validator) { - return new Setting(new GroupKey(key), (s) -> "", (s) -> null, dynamic, scope) { + public static Setting groupSetting(String key, Consumer validator, Property... properties) { + return new Setting(new GroupKey(key), (s) -> "", (s) -> null, properties) { @Override public boolean isGroupSetting() { return true; @@ -592,30 +674,37 @@ public class Setting extends ToXContentToBytes { }; } - public static Setting timeSetting(String key, Function defaultValue, TimeValue minValue, boolean dynamic, Scope scope) { - return new Setting<>(key, defaultValue, (s) -> parseTimeValue(s, minValue, key), dynamic, scope); + public static Setting timeSetting(String key, Function defaultValue, TimeValue minValue, + Property... properties) { + return new Setting<>(key, defaultValue, (s) -> { + TimeValue timeValue = TimeValue.parseTimeValue(s, null, key); + if (timeValue.millis() < minValue.millis()) { + throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue); + } + return timeValue; + }, properties); } - public static Setting timeSetting(String key, TimeValue defaultValue, TimeValue minValue, boolean dynamic, Scope scope) { - return timeSetting(key, (s) -> defaultValue.getStringRep(), minValue, dynamic, scope); + public static Setting timeSetting(String key, TimeValue defaultValue, TimeValue minValue, Property... properties) { + return timeSetting(key, (s) -> defaultValue.getStringRep(), minValue, properties); } - public static Setting timeSetting(String key, TimeValue defaultValue, boolean dynamic, Scope scope) { - return new Setting<>(key, (s) -> defaultValue.toString(), (s) -> TimeValue.parseTimeValue(s, key), dynamic, scope); + public static Setting timeSetting(String key, TimeValue defaultValue, Property... properties) { + return new Setting<>(key, (s) -> defaultValue.toString(), (s) -> TimeValue.parseTimeValue(s, key), properties); } - public static Setting timeSetting(String key, Setting fallbackSetting, boolean dynamic, Scope scope) { - return new Setting<>(key, fallbackSetting::getRaw, (s) -> TimeValue.parseTimeValue(s, key), dynamic, scope); + public static Setting timeSetting(String key, Setting fallbackSetting, Property... properties) { + return new Setting<>(key, fallbackSetting::getRaw, (s) -> TimeValue.parseTimeValue(s, key), properties); } - public static Setting doubleSetting(String key, double defaultValue, double minValue, boolean dynamic, Scope scope) { + public static Setting doubleSetting(String key, double defaultValue, double minValue, Property... properties) { return new Setting<>(key, (s) -> Double.toString(defaultValue), (s) -> { final double d = Double.parseDouble(s); if (d < minValue) { throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue); } return d; - }, dynamic, scope); + }, properties); } @Override @@ -636,8 +725,9 @@ public class Setting extends ToXContentToBytes { * can easily be added with this setting. Yet, prefix key settings don't support updaters out of the box unless * {@link #getConcreteSetting(String)} is used to pull the updater. */ - public static Setting prefixKeySetting(String prefix, String defaultValue, Function parser, boolean dynamic, Scope scope) { - return affixKeySetting(AffixKey.withPrefix(prefix), (s) -> defaultValue, parser, dynamic, scope); + public static Setting prefixKeySetting(String prefix, String defaultValue, Function parser, + Property... properties) { + return affixKeySetting(AffixKey.withPrefix(prefix), (s) -> defaultValue, parser, properties); } /** @@ -645,16 +735,19 @@ public class Setting extends ToXContentToBytes { * storage.${backend}.enable=[true|false] can easily be added with this setting. Yet, adfix key settings don't support updaters * out of the box unless {@link #getConcreteSetting(String)} is used to pull the updater. */ - public static Setting adfixKeySetting(String prefix, String suffix, Function defaultValue, Function parser, boolean dynamic, Scope scope) { - return affixKeySetting(AffixKey.withAdfix(prefix, suffix), defaultValue, parser, dynamic, scope); + public static Setting adfixKeySetting(String prefix, String suffix, Function defaultValue, + Function parser, Property... properties) { + return affixKeySetting(AffixKey.withAdfix(prefix, suffix), defaultValue, parser, properties); } - public static Setting adfixKeySetting(String prefix, String suffix, String defaultValue, Function parser, boolean dynamic, Scope scope) { - return adfixKeySetting(prefix, suffix, (s) -> defaultValue, parser, dynamic, scope); + public static Setting adfixKeySetting(String prefix, String suffix, String defaultValue, Function parser, + Property... properties) { + return adfixKeySetting(prefix, suffix, (s) -> defaultValue, parser, properties); } - public static Setting affixKeySetting(AffixKey key, Function defaultValue, Function parser, boolean dynamic, Scope scope) { - return new Setting(key, defaultValue, parser, dynamic, scope) { + public static Setting affixKeySetting(AffixKey key, Function defaultValue, Function parser, + Property... properties) { + return new Setting(key, defaultValue, parser, properties) { @Override boolean isGroupSetting() { @@ -669,7 +762,7 @@ public class Setting extends ToXContentToBytes { @Override public Setting getConcreteSetting(String key) { if (match(key)) { - return new Setting<>(key, defaultValue, parser, dynamic, scope); + return new Setting<>(key, defaultValue, parser, properties); } else { throw new IllegalArgumentException("key [" + key + "] must match [" + getKey() + "] but didn't."); } diff --git a/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java b/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java index b06f53459c8..9fc2ee257a0 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java +++ b/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java @@ -35,7 +35,7 @@ public class SettingsModule extends AbstractModule { private final Settings settings; private final Set settingsFilterPattern = new HashSet<>(); - private final Map> clusterSettings = new HashMap<>(); + private final Map> nodeSettings = new HashMap<>(); private final Map> indexSettings = new HashMap<>(); private static final Predicate TRIBE_CLIENT_NODE_SETTINGS_PREDICATE = (s) -> s.startsWith("tribe.") && TribeService.TRIBE_SETTING_KEYS.contains(s) == false; @@ -52,7 +52,7 @@ public class SettingsModule extends AbstractModule { @Override protected void configure() { final IndexScopedSettings indexScopedSettings = new IndexScopedSettings(settings, new HashSet<>(this.indexSettings.values())); - final ClusterSettings clusterSettings = new ClusterSettings(settings, new HashSet<>(this.clusterSettings.values())); + final ClusterSettings clusterSettings = new ClusterSettings(settings, new HashSet<>(this.nodeSettings.values())); // by now we are fully configured, lets check node level settings for unregistered index settings indexScopedSettings.validate(settings.filter(IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE)); final Predicate acceptOnlyClusterSettings = TRIBE_CLIENT_NODE_SETTINGS_PREDICATE.or(IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE).negate(); @@ -71,19 +71,28 @@ public class SettingsModule extends AbstractModule { * the setting during startup. */ public void registerSetting(Setting setting) { - switch (setting.getScope()) { - case CLUSTER: - if (clusterSettings.containsKey(setting.getKey())) { - throw new IllegalArgumentException("Cannot register setting [" + setting.getKey() + "] twice"); - } - clusterSettings.put(setting.getKey(), setting); - break; - case INDEX: - if (indexSettings.containsKey(setting.getKey())) { - throw new IllegalArgumentException("Cannot register setting [" + setting.getKey() + "] twice"); - } - indexSettings.put(setting.getKey(), setting); - break; + if (setting.isFiltered()) { + if (settingsFilterPattern.contains(setting.getKey()) == false) { + registerSettingsFilter(setting.getKey()); + } + } + + // We validate scope settings. We should have one and only one scope. + if (setting.hasNodeScope() && setting.hasIndexScope()) { + throw new IllegalArgumentException("More than one scope has been added to the setting [" + setting.getKey() + "]"); + } + if (setting.hasNodeScope()) { + if (nodeSettings.containsKey(setting.getKey())) { + throw new IllegalArgumentException("Cannot register setting [" + setting.getKey() + "] twice"); + } + nodeSettings.put(setting.getKey(), setting); + } else if (setting.hasIndexScope()) { + if (indexSettings.containsKey(setting.getKey())) { + throw new IllegalArgumentException("Cannot register setting [" + setting.getKey() + "] twice"); + } + indexSettings.put(setting.getKey(), setting); + } else { + throw new IllegalArgumentException("No scope found for setting [" + setting.getKey() + "]"); } } @@ -101,21 +110,15 @@ public class SettingsModule extends AbstractModule { settingsFilterPattern.add(filter); } - public void registerSettingsFilterIfMissing(String filter) { - if (settingsFilterPattern.contains(filter) == false) { - registerSettingsFilter(filter); - } - } - /** * Check if a setting has already been registered */ public boolean exists(Setting setting) { - switch (setting.getScope()) { - case CLUSTER: - return clusterSettings.containsKey(setting.getKey()); - case INDEX: - return indexSettings.containsKey(setting.getKey()); + if (setting.hasNodeScope()) { + return nodeSettings.containsKey(setting.getKey()); + } + if (setting.hasIndexScope()) { + return indexSettings.containsKey(setting.getKey()); } throw new IllegalArgumentException("setting scope is unknown. This should never happen!"); } diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java index 10b1412425c..df1288d4fd2 100644 --- a/core/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java +++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java @@ -20,6 +20,7 @@ package org.elasticsearch.common.util.concurrent; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import java.util.Arrays; @@ -41,7 +42,8 @@ public class EsExecutors { * Settings key to manually set the number of available processors. * This is used to adjust thread pools sizes etc. per node. */ - public static final Setting PROCESSORS_SETTING = Setting.intSetting("processors", Math.min(32, Runtime.getRuntime().availableProcessors()), 1, false, Setting.Scope.CLUSTER) ; + public static final Setting PROCESSORS_SETTING = + Setting.intSetting("processors", Math.min(32, Runtime.getRuntime().availableProcessors()), 1, Property.NodeScope); /** * Returns the number of processors available but at most 32. diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java index 1928392fe41..2ac6082e85d 100644 --- a/core/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java +++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java @@ -19,11 +19,11 @@ package org.elasticsearch.common.util.concurrent; import org.apache.lucene.util.CloseableThreadLocal; -import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import java.io.Closeable; @@ -63,7 +63,7 @@ import java.util.concurrent.atomic.AtomicBoolean; public final class ThreadContext implements Closeable, Writeable{ public static final String PREFIX = "request.headers"; - public static final Setting DEFAULT_HEADERS_SETTING = Setting.groupSetting(PREFIX + ".", false, Setting.Scope.CLUSTER); + public static final Setting DEFAULT_HEADERS_SETTING = Setting.groupSetting(PREFIX + ".", Property.NodeScope); private final Map defaultHeader; private static final ThreadContextStruct DEFAULT_CONTEXT = new ThreadContextStruct(Collections.emptyMap()); private final ContextThreadLocal threadLocal; diff --git a/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java b/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java index e08757a3f2a..4076b880d6f 100644 --- a/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java +++ b/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java @@ -23,6 +23,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.multibindings.Multibinder; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.ExtensionPoint; import org.elasticsearch.discovery.local.LocalDiscovery; @@ -45,10 +46,11 @@ import java.util.function.Function; */ public class DiscoveryModule extends AbstractModule { - public static final Setting DISCOVERY_TYPE_SETTING = new Setting<>("discovery.type", - settings -> DiscoveryNode.localNode(settings) ? "local" : "zen", Function.identity(), false, Setting.Scope.CLUSTER); - public static final Setting ZEN_MASTER_SERVICE_TYPE_SETTING = new Setting<>("discovery.zen.masterservice.type", - "zen", Function.identity(), false, Setting.Scope.CLUSTER); + public static final Setting DISCOVERY_TYPE_SETTING = + new Setting<>("discovery.type", settings -> DiscoveryNode.localNode(settings) ? "local" : "zen", Function.identity(), + Property.NodeScope); + public static final Setting ZEN_MASTER_SERVICE_TYPE_SETTING = + new Setting<>("discovery.zen.masterservice.type", "zen", Function.identity(), Property.NodeScope); private final Settings settings; private final Map>> unicastHostProviders = new HashMap<>(); diff --git a/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java b/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java index b899f0a8afc..ca7ab342cd5 100644 --- a/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java +++ b/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java @@ -24,6 +24,7 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.rest.RestStatus; @@ -42,16 +43,25 @@ public class DiscoverySettings extends AbstractComponent { * sets the timeout for a complete publishing cycle, including both sending and committing. the master * will continue to process the next cluster state update after this time has elapsed **/ - public static final Setting PUBLISH_TIMEOUT_SETTING = Setting.positiveTimeSetting("discovery.zen.publish_timeout", TimeValue.timeValueSeconds(30), true, Setting.Scope.CLUSTER); + public static final Setting PUBLISH_TIMEOUT_SETTING = + Setting.positiveTimeSetting("discovery.zen.publish_timeout", TimeValue.timeValueSeconds(30), + Property.Dynamic, Property.NodeScope); /** * sets the timeout for receiving enough acks for a specific cluster state and committing it. failing * to receive responses within this window will cause the cluster state change to be rejected. */ - public static final Setting COMMIT_TIMEOUT_SETTING = new Setting<>("discovery.zen.commit_timeout", (s) -> PUBLISH_TIMEOUT_SETTING.getRaw(s), (s) -> TimeValue.parseTimeValue(s, TimeValue.timeValueSeconds(30), "discovery.zen.commit_timeout"), true, Setting.Scope.CLUSTER); - public static final Setting NO_MASTER_BLOCK_SETTING = new Setting<>("discovery.zen.no_master_block", "write", DiscoverySettings::parseNoMasterBlock, true, Setting.Scope.CLUSTER); - public static final Setting PUBLISH_DIFF_ENABLE_SETTING = Setting.boolSetting("discovery.zen.publish_diff.enable", true, true, Setting.Scope.CLUSTER); - public static final Setting INITIAL_STATE_TIMEOUT_SETTING = Setting.positiveTimeSetting("discovery.initial_state_timeout", TimeValue.timeValueSeconds(30), false, Setting.Scope.CLUSTER); + public static final Setting COMMIT_TIMEOUT_SETTING = + new Setting<>("discovery.zen.commit_timeout", (s) -> PUBLISH_TIMEOUT_SETTING.getRaw(s), + (s) -> TimeValue.parseTimeValue(s, TimeValue.timeValueSeconds(30), "discovery.zen.commit_timeout"), + Property.Dynamic, Property.NodeScope); + public static final Setting NO_MASTER_BLOCK_SETTING = + new Setting<>("discovery.zen.no_master_block", "write", DiscoverySettings::parseNoMasterBlock, + Property.Dynamic, Property.NodeScope); + public static final Setting PUBLISH_DIFF_ENABLE_SETTING = + Setting.boolSetting("discovery.zen.publish_diff.enable", true, Property.Dynamic, Property.NodeScope); + public static final Setting INITIAL_STATE_TIMEOUT_SETTING = + Setting.positiveTimeSetting("discovery.initial_state_timeout", TimeValue.timeValueSeconds(30), Property.NodeScope); private volatile ClusterBlock noMasterBlock; private volatile TimeValue publishTimeout; diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index c0dd78b4e5f..a19f4fa4af1 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -46,6 +46,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.Discovery; @@ -86,17 +87,28 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; */ public class ZenDiscovery extends AbstractLifecycleComponent implements Discovery, PingContextProvider { - public final static Setting PING_TIMEOUT_SETTING = Setting.positiveTimeSetting("discovery.zen.ping_timeout", timeValueSeconds(3), false, Setting.Scope.CLUSTER); - public final static Setting JOIN_TIMEOUT_SETTING = Setting.timeSetting("discovery.zen.join_timeout", - settings -> TimeValue.timeValueMillis(PING_TIMEOUT_SETTING.get(settings).millis() * 20).toString(), TimeValue.timeValueMillis(0), false, Setting.Scope.CLUSTER); - public final static Setting JOIN_RETRY_ATTEMPTS_SETTING = Setting.intSetting("discovery.zen.join_retry_attempts", 3, 1, false, Setting.Scope.CLUSTER); - public final static Setting JOIN_RETRY_DELAY_SETTING = Setting.positiveTimeSetting("discovery.zen.join_retry_delay", TimeValue.timeValueMillis(100), false, Setting.Scope.CLUSTER); - public final static Setting MAX_PINGS_FROM_ANOTHER_MASTER_SETTING = Setting.intSetting("discovery.zen.max_pings_from_another_master", 3, 1, false, Setting.Scope.CLUSTER); - public final static Setting SEND_LEAVE_REQUEST_SETTING = Setting.boolSetting("discovery.zen.send_leave_request", true, false, Setting.Scope.CLUSTER); - public final static Setting MASTER_ELECTION_FILTER_CLIENT_SETTING = Setting.boolSetting("discovery.zen.master_election.filter_client", true, false, Setting.Scope.CLUSTER); - public final static Setting MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING = Setting.timeSetting("discovery.zen.master_election.wait_for_joins_timeout", - settings -> TimeValue.timeValueMillis(JOIN_TIMEOUT_SETTING.get(settings).millis() / 2).toString(), TimeValue.timeValueMillis(0), false, Setting.Scope.CLUSTER); - public final static Setting MASTER_ELECTION_FILTER_DATA_SETTING = Setting.boolSetting("discovery.zen.master_election.filter_data", false, false, Setting.Scope.CLUSTER); + public final static Setting PING_TIMEOUT_SETTING = + Setting.positiveTimeSetting("discovery.zen.ping_timeout", timeValueSeconds(3), Property.NodeScope); + public final static Setting JOIN_TIMEOUT_SETTING = + Setting.timeSetting("discovery.zen.join_timeout", + settings -> TimeValue.timeValueMillis(PING_TIMEOUT_SETTING.get(settings).millis() * 20).toString(), + TimeValue.timeValueMillis(0), Property.NodeScope); + public final static Setting JOIN_RETRY_ATTEMPTS_SETTING = + Setting.intSetting("discovery.zen.join_retry_attempts", 3, 1, Property.NodeScope); + public final static Setting JOIN_RETRY_DELAY_SETTING = + Setting.positiveTimeSetting("discovery.zen.join_retry_delay", TimeValue.timeValueMillis(100), Property.NodeScope); + public final static Setting MAX_PINGS_FROM_ANOTHER_MASTER_SETTING = + Setting.intSetting("discovery.zen.max_pings_from_another_master", 3, 1, Property.NodeScope); + public final static Setting SEND_LEAVE_REQUEST_SETTING = + Setting.boolSetting("discovery.zen.send_leave_request", true, Property.NodeScope); + public final static Setting MASTER_ELECTION_FILTER_CLIENT_SETTING = + Setting.boolSetting("discovery.zen.master_election.filter_client", true, Property.NodeScope); + public final static Setting MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING = + Setting.timeSetting("discovery.zen.master_election.wait_for_joins_timeout", + settings -> TimeValue.timeValueMillis(JOIN_TIMEOUT_SETTING.get(settings).millis() / 2).toString(), TimeValue.timeValueMillis(0), + Property.NodeScope); + public final static Setting MASTER_ELECTION_FILTER_DATA_SETTING = + Setting.boolSetting("discovery.zen.master_election.filter_data", false, Property.NodeScope); public static final String DISCOVERY_REJOIN_ACTION_NAME = "internal:discovery/zen/rejoin"; @@ -823,7 +835,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen return null; } if (logger.isTraceEnabled()) { - StringBuilder sb = new StringBuilder("full ping responses:"); + StringBuilder sb = new StringBuilder(); if (fullPingResponses.length == 0) { sb.append(" {none}"); } else { @@ -831,7 +843,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen sb.append("\n\t--> ").append(pingResponse); } } - logger.trace(sb.toString()); + logger.trace("full ping responses:{}", sb); } // filter responses @@ -848,7 +860,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen } if (logger.isDebugEnabled()) { - StringBuilder sb = new StringBuilder("filtered ping responses: (filter_client[").append(masterElectionFilterClientNodes).append("], filter_data[").append(masterElectionFilterDataNodes).append("])"); + StringBuilder sb = new StringBuilder(); if (pingResponses.isEmpty()) { sb.append(" {none}"); } else { @@ -856,7 +868,8 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen sb.append("\n\t--> ").append(pingResponse); } } - logger.debug(sb.toString()); + logger.debug("filtered ping responses: (filter_client[{}], filter_data[{}]){}", masterElectionFilterClientNodes, + masterElectionFilterDataNodes, sb); } final DiscoveryNode localNode = clusterService.localNode(); @@ -918,7 +931,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen // *** called from within an cluster state update task *** // assert Thread.currentThread().getName().contains(InternalClusterService.UPDATE_THREAD_NAME); - logger.warn(reason + ", current nodes: {}", clusterState.nodes()); + logger.warn("{}, current nodes: {}", reason, clusterState.nodes()); nodesFD.stop(); masterFD.stop(reason); diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/elect/ElectMasterService.java b/core/src/main/java/org/elasticsearch/discovery/zen/elect/ElectMasterService.java index 1482fb92a22..a3da8be5a94 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/elect/ElectMasterService.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/elect/ElectMasterService.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; @@ -40,7 +41,8 @@ import java.util.List; */ public class ElectMasterService extends AbstractComponent { - public static final Setting DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING = Setting.intSetting("discovery.zen.minimum_master_nodes", -1, true, Setting.Scope.CLUSTER); + public static final Setting DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING = + Setting.intSetting("discovery.zen.minimum_master_nodes", -1, Property.Dynamic, Property.NodeScope); // This is the minimum version a master needs to be on, otherwise it gets ignored // This is based on the minimum compatible version of the current version this node is on diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/fd/FaultDetection.java b/core/src/main/java/org/elasticsearch/discovery/zen/fd/FaultDetection.java index 62b0250315c..1cfd46634a5 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/fd/FaultDetection.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/fd/FaultDetection.java @@ -22,7 +22,7 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Scope; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.threadpool.ThreadPool; @@ -37,11 +37,16 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; */ public abstract class FaultDetection extends AbstractComponent { - public static final Setting CONNECT_ON_NETWORK_DISCONNECT_SETTING = Setting.boolSetting("discovery.zen.fd.connect_on_network_disconnect", false, false, Scope.CLUSTER); - public static final Setting PING_INTERVAL_SETTING = Setting.positiveTimeSetting("discovery.zen.fd.ping_interval", timeValueSeconds(1), false, Scope.CLUSTER); - public static final Setting PING_TIMEOUT_SETTING = Setting.timeSetting("discovery.zen.fd.ping_timeout", timeValueSeconds(30), false, Scope.CLUSTER); - public static final Setting PING_RETRIES_SETTING = Setting.intSetting("discovery.zen.fd.ping_retries", 3, false, Scope.CLUSTER); - public static final Setting REGISTER_CONNECTION_LISTENER_SETTING = Setting.boolSetting("discovery.zen.fd.register_connection_listener", true, false, Scope.CLUSTER); + public static final Setting CONNECT_ON_NETWORK_DISCONNECT_SETTING = + Setting.boolSetting("discovery.zen.fd.connect_on_network_disconnect", false, Property.NodeScope); + public static final Setting PING_INTERVAL_SETTING = + Setting.positiveTimeSetting("discovery.zen.fd.ping_interval", timeValueSeconds(1), Property.NodeScope); + public static final Setting PING_TIMEOUT_SETTING = + Setting.timeSetting("discovery.zen.fd.ping_timeout", timeValueSeconds(30), Property.NodeScope); + public static final Setting PING_RETRIES_SETTING = + Setting.intSetting("discovery.zen.fd.ping_retries", 3, Property.NodeScope); + public static final Setting REGISTER_CONNECTION_LISTENER_SETTING = + Setting.boolSetting("discovery.zen.fd.register_connection_listener", true, Property.NodeScope); protected final ThreadPool threadPool; protected final ClusterName clusterName; diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java b/core/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java index 427abca8d85..0e9b81ad1fc 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; @@ -86,8 +87,11 @@ import static org.elasticsearch.discovery.zen.ping.ZenPing.PingResponse.readPing public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPing { public static final String ACTION_NAME = "internal:discovery/zen/unicast"; - public static final Setting> DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING = Setting.listSetting("discovery.zen.ping.unicast.hosts", Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER); - public static final Setting DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING = Setting.intSetting("discovery.zen.ping.unicast.concurrent_connects", 10, 0, false, Setting.Scope.CLUSTER); + public static final Setting> DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING = + Setting.listSetting("discovery.zen.ping.unicast.hosts", Collections.emptyList(), Function.identity(), + Property.NodeScope); + public static final Setting DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING = + Setting.intSetting("discovery.zen.ping.unicast.concurrent_connects", 10, 0, Property.NodeScope); // these limits are per-address public static final int LIMIT_FOREIGN_PORTS_COUNT = 1; diff --git a/core/src/main/java/org/elasticsearch/env/Environment.java b/core/src/main/java/org/elasticsearch/env/Environment.java index 1f8cffc97f3..e022ce6ad2f 100644 --- a/core/src/main/java/org/elasticsearch/env/Environment.java +++ b/core/src/main/java/org/elasticsearch/env/Environment.java @@ -23,6 +23,7 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import java.io.IOException; @@ -46,15 +47,17 @@ import static org.elasticsearch.common.Strings.cleanPath; // TODO: move PathUtils to be package-private here instead of // public+forbidden api! public class Environment { - public static final Setting PATH_HOME_SETTING = Setting.simpleString("path.home", false, Setting.Scope.CLUSTER); - public static final Setting PATH_CONF_SETTING = Setting.simpleString("path.conf", false, Setting.Scope.CLUSTER); - public static final Setting PATH_SCRIPTS_SETTING = Setting.simpleString("path.scripts", false, Setting.Scope.CLUSTER); - public static final Setting> PATH_DATA_SETTING = Setting.listSetting("path.data", Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER); - public static final Setting PATH_LOGS_SETTING = Setting.simpleString("path.logs", false, Setting.Scope.CLUSTER); - public static final Setting PATH_PLUGINS_SETTING = Setting.simpleString("path.plugins", false, Setting.Scope.CLUSTER); - public static final Setting> PATH_REPO_SETTING = Setting.listSetting("path.repo", Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER); - public static final Setting PATH_SHARED_DATA_SETTING = Setting.simpleString("path.shared_data", false, Setting.Scope.CLUSTER); - public static final Setting PIDFILE_SETTING = Setting.simpleString("pidfile", false, Setting.Scope.CLUSTER); + public static final Setting PATH_HOME_SETTING = Setting.simpleString("path.home", Property.NodeScope); + public static final Setting PATH_CONF_SETTING = Setting.simpleString("path.conf", Property.NodeScope); + public static final Setting PATH_SCRIPTS_SETTING = Setting.simpleString("path.scripts", Property.NodeScope); + public static final Setting> PATH_DATA_SETTING = + Setting.listSetting("path.data", Collections.emptyList(), Function.identity(), Property.NodeScope); + public static final Setting PATH_LOGS_SETTING = Setting.simpleString("path.logs", Property.NodeScope); + public static final Setting PATH_PLUGINS_SETTING = Setting.simpleString("path.plugins", Property.NodeScope); + public static final Setting> PATH_REPO_SETTING = + Setting.listSetting("path.repo", Collections.emptyList(), Function.identity(), Property.NodeScope); + public static final Setting PATH_SHARED_DATA_SETTING = Setting.simpleString("path.shared_data", Property.NodeScope); + public static final Setting PIDFILE_SETTING = Setting.simpleString("pidfile", Property.NodeScope); private final Settings settings; diff --git a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java index 0eec5c5765e..0b1e3ebf950 100644 --- a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -36,7 +36,7 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Scope; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; @@ -49,7 +49,6 @@ import org.elasticsearch.index.store.FsDirectoryService; import org.elasticsearch.monitor.fs.FsInfo; import org.elasticsearch.monitor.fs.FsProbe; import org.elasticsearch.monitor.jvm.JvmInfo; -import org.elasticsearch.monitor.process.ProcessProbe; import java.io.Closeable; import java.io.IOException; @@ -137,20 +136,20 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl /** * Maximum number of data nodes that should run in an environment. */ - public static final Setting MAX_LOCAL_STORAGE_NODES_SETTING = Setting.intSetting("node.max_local_storage_nodes", 50, 1, false, - Scope.CLUSTER); + public static final Setting MAX_LOCAL_STORAGE_NODES_SETTING = Setting.intSetting("node.max_local_storage_nodes", 50, 1, + Property.NodeScope); /** * If true automatically append node id to custom data paths. */ - public static final Setting ADD_NODE_ID_TO_CUSTOM_PATH = Setting.boolSetting("node.add_id_to_custom_path", true, false, - Scope.CLUSTER); + public static final Setting ADD_NODE_ID_TO_CUSTOM_PATH = + Setting.boolSetting("node.add_id_to_custom_path", true, Property.NodeScope); /** * If true the [verbose] SegmentInfos.infoStream logging is sent to System.out. */ - public static final Setting ENABLE_LUCENE_SEGMENT_INFOS_TRACE_SETTING = Setting - .boolSetting("node.enable_lucene_segment_infos_trace", false, false, Scope.CLUSTER); + public static final Setting ENABLE_LUCENE_SEGMENT_INFOS_TRACE_SETTING = + Setting.boolSetting("node.enable_lucene_segment_infos_trace", false, Property.NodeScope); public static final String NODES_FOLDER = "nodes"; public static final String INDICES_FOLDER = "indices"; @@ -225,7 +224,7 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl maybeLogPathDetails(); maybeLogHeapDetails(); - + applySegmentInfosTrace(settings); assertCanWrite(); success = true; @@ -250,7 +249,7 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl // We do some I/O in here, so skip this if DEBUG/INFO are not enabled: if (logger.isDebugEnabled()) { // Log one line per path.data: - StringBuilder sb = new StringBuilder("node data locations details:"); + StringBuilder sb = new StringBuilder(); for (NodePath nodePath : nodePaths) { sb.append('\n').append(" -> ").append(nodePath.path.toAbsolutePath()); @@ -278,7 +277,7 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl .append(fsPath.getType()) .append(']'); } - logger.debug(sb.toString()); + logger.debug("node data locations details:{}", sb); } else if (logger.isInfoEnabled()) { FsInfo.Path totFSPath = new FsInfo.Path(); Set allTypes = new HashSet<>(); @@ -306,14 +305,8 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl } // Just log a 1-line summary: - logger.info(String.format(Locale.ROOT, - "using [%d] data paths, mounts [%s], net usable_space [%s], net total_space [%s], spins? [%s], types [%s]", - nodePaths.length, - allMounts, - totFSPath.getAvailable(), - totFSPath.getTotal(), - toString(allSpins), - toString(allTypes))); + logger.info("using [{}] data paths, mounts [{}], net usable_space [{}], net total_space [{}], spins? [{}], types [{}]", + nodePaths.length, allMounts, totFSPath.getAvailable(), totFSPath.getTotal(), toString(allSpins), toString(allTypes)); } } diff --git a/core/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java b/core/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java index e90cb750cf5..15277d6fb4f 100644 --- a/core/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java +++ b/core/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java @@ -202,7 +202,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL try (DirectoryStream stream = Files.newDirectoryStream(stateLocation)) { for (Path stateFile : stream) { if (logger.isTraceEnabled()) { - logger.trace("[upgrade]: processing [" + stateFile.getFileName() + "]"); + logger.trace("[upgrade]: processing [{}]", stateFile.getFileName()); } final String name = stateFile.getFileName().toString(); if (name.startsWith("metadata-")) { diff --git a/core/src/main/java/org/elasticsearch/gateway/GatewayService.java b/core/src/main/java/org/elasticsearch/gateway/GatewayService.java index 384539b4c63..6d85fb2f41d 100644 --- a/core/src/main/java/org/elasticsearch/gateway/GatewayService.java +++ b/core/src/main/java/org/elasticsearch/gateway/GatewayService.java @@ -37,6 +37,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; @@ -52,20 +53,20 @@ import java.util.concurrent.atomic.AtomicBoolean; */ public class GatewayService extends AbstractLifecycleComponent implements ClusterStateListener { - public static final Setting EXPECTED_NODES_SETTING = Setting.intSetting( - "gateway.expected_nodes", -1, -1, false, Setting.Scope.CLUSTER); - public static final Setting EXPECTED_DATA_NODES_SETTING = Setting.intSetting( - "gateway.expected_data_nodes", -1, -1, false, Setting.Scope.CLUSTER); - public static final Setting EXPECTED_MASTER_NODES_SETTING = Setting.intSetting( - "gateway.expected_master_nodes", -1, -1, false, Setting.Scope.CLUSTER); - public static final Setting RECOVER_AFTER_TIME_SETTING = Setting.positiveTimeSetting( - "gateway.recover_after_time", TimeValue.timeValueMillis(0), false, Setting.Scope.CLUSTER); - public static final Setting RECOVER_AFTER_NODES_SETTING = Setting.intSetting( - "gateway.recover_after_nodes", -1, -1, false, Setting.Scope.CLUSTER); - public static final Setting RECOVER_AFTER_DATA_NODES_SETTING = Setting.intSetting( - "gateway.recover_after_data_nodes", -1, -1, false, Setting.Scope.CLUSTER); - public static final Setting RECOVER_AFTER_MASTER_NODES_SETTING = Setting.intSetting( - "gateway.recover_after_master_nodes", 0, 0, false, Setting.Scope.CLUSTER); + public static final Setting EXPECTED_NODES_SETTING = + Setting.intSetting("gateway.expected_nodes", -1, -1, Property.NodeScope); + public static final Setting EXPECTED_DATA_NODES_SETTING = + Setting.intSetting("gateway.expected_data_nodes", -1, -1, Property.NodeScope); + public static final Setting EXPECTED_MASTER_NODES_SETTING = + Setting.intSetting("gateway.expected_master_nodes", -1, -1, Property.NodeScope); + public static final Setting RECOVER_AFTER_TIME_SETTING = + Setting.positiveTimeSetting("gateway.recover_after_time", TimeValue.timeValueMillis(0), Property.NodeScope); + public static final Setting RECOVER_AFTER_NODES_SETTING = + Setting.intSetting("gateway.recover_after_nodes", -1, -1, Property.NodeScope); + public static final Setting RECOVER_AFTER_DATA_NODES_SETTING = + Setting.intSetting("gateway.recover_after_data_nodes", -1, -1, Property.NodeScope); + public static final Setting RECOVER_AFTER_MASTER_NODES_SETTING = + Setting.intSetting("gateway.recover_after_master_nodes", 0, 0, Property.NodeScope); public static final ClusterBlock STATE_NOT_RECOVERED_BLOCK = new ClusterBlock(1, "state not recovered / initialized", true, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL); @@ -161,11 +162,14 @@ public class GatewayService extends AbstractLifecycleComponent i if (state.nodes().masterNodeId() == null) { logger.debug("not recovering from gateway, no master elected yet"); } else if (recoverAfterNodes != -1 && (nodes.masterAndDataNodes().size()) < recoverAfterNodes) { - logger.debug("not recovering from gateway, nodes_size (data+master) [" + nodes.masterAndDataNodes().size() + "] < recover_after_nodes [" + recoverAfterNodes + "]"); + logger.debug("not recovering from gateway, nodes_size (data+master) [{}] < recover_after_nodes [{}]", + nodes.masterAndDataNodes().size(), recoverAfterNodes); } else if (recoverAfterDataNodes != -1 && nodes.dataNodes().size() < recoverAfterDataNodes) { - logger.debug("not recovering from gateway, nodes_size (data) [" + nodes.dataNodes().size() + "] < recover_after_data_nodes [" + recoverAfterDataNodes + "]"); + logger.debug("not recovering from gateway, nodes_size (data) [{}] < recover_after_data_nodes [{}]", + nodes.dataNodes().size(), recoverAfterDataNodes); } else if (recoverAfterMasterNodes != -1 && nodes.masterNodes().size() < recoverAfterMasterNodes) { - logger.debug("not recovering from gateway, nodes_size (master) [" + nodes.masterNodes().size() + "] < recover_after_master_nodes [" + recoverAfterMasterNodes + "]"); + logger.debug("not recovering from gateway, nodes_size (master) [{}] < recover_after_master_nodes [{}]", + nodes.masterNodes().size(), recoverAfterMasterNodes); } else { boolean enforceRecoverAfterTime; String reason; diff --git a/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java b/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java index ed61aa2c1fd..5f6e50d6fc9 100644 --- a/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java +++ b/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java @@ -31,6 +31,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.gateway.TransportNodesListGatewayStartedShards.NodeGatewayStartedShards; import org.elasticsearch.index.shard.ShardStateMetaData; @@ -67,9 +68,13 @@ public abstract class PrimaryShardAllocator extends AbstractComponent { } }; - public static final Setting NODE_INITIAL_SHARDS_SETTING = new Setting<>("gateway.initial_shards", (settings) -> settings.get("gateway.local.initial_shards", "quorum"), INITIAL_SHARDS_PARSER, true, Setting.Scope.CLUSTER); + public static final Setting NODE_INITIAL_SHARDS_SETTING = + new Setting<>("gateway.initial_shards", (settings) -> settings.get("gateway.local.initial_shards", "quorum"), INITIAL_SHARDS_PARSER, + Property.Dynamic, Property.NodeScope); @Deprecated - public static final Setting INDEX_RECOVERY_INITIAL_SHARDS_SETTING = new Setting<>("index.recovery.initial_shards", (settings) -> NODE_INITIAL_SHARDS_SETTING.get(settings) , INITIAL_SHARDS_PARSER, true, Setting.Scope.INDEX); + public static final Setting INDEX_RECOVERY_INITIAL_SHARDS_SETTING = + new Setting<>("index.recovery.initial_shards", (settings) -> NODE_INITIAL_SHARDS_SETTING.get(settings) , INITIAL_SHARDS_PARSER, + Property.Dynamic, Property.IndexScope); public PrimaryShardAllocator(Settings settings) { super(settings); diff --git a/core/src/main/java/org/elasticsearch/http/HttpTransportSettings.java b/core/src/main/java/org/elasticsearch/http/HttpTransportSettings.java index 6c91df079b9..48af1c83965 100644 --- a/core/src/main/java/org/elasticsearch/http/HttpTransportSettings.java +++ b/core/src/main/java/org/elasticsearch/http/HttpTransportSettings.java @@ -20,42 +20,64 @@ package org.elasticsearch.http; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Scope; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.transport.PortsRange; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import java.util.List; +import java.util.function.Function; import static java.util.Collections.emptyList; import static org.elasticsearch.common.settings.Setting.listSetting; public final class HttpTransportSettings { - public static final Setting SETTING_CORS_ENABLED = Setting.boolSetting("http.cors.enabled", false, false, Scope.CLUSTER); - public static final Setting SETTING_CORS_ALLOW_ORIGIN = new Setting("http.cors.allow-origin", "", (value) -> value, false, Scope.CLUSTER); - public static final Setting SETTING_CORS_MAX_AGE = Setting.intSetting("http.cors.max-age", 1728000, false, Scope.CLUSTER); - public static final Setting SETTING_CORS_ALLOW_METHODS = new Setting("http.cors.allow-methods", "OPTIONS, HEAD, GET, POST, PUT, DELETE", (value) -> value, false, Scope.CLUSTER); - public static final Setting SETTING_CORS_ALLOW_HEADERS = new Setting("http.cors.allow-headers", "X-Requested-With, Content-Type, Content-Length", (value) -> value, false, Scope.CLUSTER); - public static final Setting SETTING_CORS_ALLOW_CREDENTIALS = Setting.boolSetting("http.cors.allow-credentials", false, false, Scope.CLUSTER); - public static final Setting SETTING_PIPELINING = Setting.boolSetting("http.pipelining", true, false, Scope.CLUSTER); - public static final Setting SETTING_PIPELINING_MAX_EVENTS = Setting.intSetting("http.pipelining.max_events", 10000, false, Scope.CLUSTER); - public static final Setting SETTING_HTTP_COMPRESSION = Setting.boolSetting("http.compression", false, false, Scope.CLUSTER); - public static final Setting SETTING_HTTP_COMPRESSION_LEVEL = Setting.intSetting("http.compression_level", 6, false, Scope.CLUSTER); - public static final Setting> SETTING_HTTP_HOST = listSetting("http.host", emptyList(), s -> s, false, Scope.CLUSTER); - public static final Setting> SETTING_HTTP_PUBLISH_HOST = listSetting("http.publish_host", SETTING_HTTP_HOST, s -> s, false, Scope.CLUSTER); - public static final Setting> SETTING_HTTP_BIND_HOST = listSetting("http.bind_host", SETTING_HTTP_HOST, s -> s, false, Scope.CLUSTER); + public static final Setting SETTING_CORS_ENABLED = + Setting.boolSetting("http.cors.enabled", false, Property.NodeScope); + public static final Setting SETTING_CORS_ALLOW_ORIGIN = + new Setting("http.cors.allow-origin", "", (value) -> value, Property.NodeScope); + public static final Setting SETTING_CORS_MAX_AGE = + Setting.intSetting("http.cors.max-age", 1728000, Property.NodeScope); + public static final Setting SETTING_CORS_ALLOW_METHODS = + new Setting("http.cors.allow-methods", "OPTIONS, HEAD, GET, POST, PUT, DELETE", (value) -> value, Property.NodeScope); + public static final Setting SETTING_CORS_ALLOW_HEADERS = + new Setting("http.cors.allow-headers", "X-Requested-With, Content-Type, Content-Length", (value) -> value, Property.NodeScope); + public static final Setting SETTING_CORS_ALLOW_CREDENTIALS = + Setting.boolSetting("http.cors.allow-credentials", false, Property.NodeScope); + public static final Setting SETTING_PIPELINING = + Setting.boolSetting("http.pipelining", true, Property.NodeScope); + public static final Setting SETTING_PIPELINING_MAX_EVENTS = + Setting.intSetting("http.pipelining.max_events", 10000, Property.NodeScope); + public static final Setting SETTING_HTTP_COMPRESSION = + Setting.boolSetting("http.compression", false, Property.NodeScope); + public static final Setting SETTING_HTTP_COMPRESSION_LEVEL = + Setting.intSetting("http.compression_level", 6, Property.NodeScope); + public static final Setting> SETTING_HTTP_HOST = + listSetting("http.host", emptyList(), Function.identity(), Property.NodeScope); + public static final Setting> SETTING_HTTP_PUBLISH_HOST = + listSetting("http.publish_host", SETTING_HTTP_HOST, Function.identity(), Property.NodeScope); + public static final Setting> SETTING_HTTP_BIND_HOST = + listSetting("http.bind_host", SETTING_HTTP_HOST, Function.identity(), Property.NodeScope); - public static final Setting SETTING_HTTP_PORT = new Setting("http.port", "9200-9300", PortsRange::new, false, Scope.CLUSTER); - public static final Setting SETTING_HTTP_PUBLISH_PORT = Setting.intSetting("http.publish_port", -1, -1, false, Scope.CLUSTER); - public static final Setting SETTING_HTTP_DETAILED_ERRORS_ENABLED = Setting.boolSetting("http.detailed_errors.enabled", true, false, Scope.CLUSTER); - public static final Setting SETTING_HTTP_MAX_CONTENT_LENGTH = Setting.byteSizeSetting("http.max_content_length", new ByteSizeValue(100, ByteSizeUnit.MB), false, Scope.CLUSTER) ; - public static final Setting SETTING_HTTP_MAX_CHUNK_SIZE = Setting.byteSizeSetting("http.max_chunk_size", new ByteSizeValue(8, ByteSizeUnit.KB), false, Scope.CLUSTER) ; - public static final Setting SETTING_HTTP_MAX_HEADER_SIZE = Setting.byteSizeSetting("http.max_header_size", new ByteSizeValue(8, ByteSizeUnit.KB), false, Scope.CLUSTER) ; - public static final Setting SETTING_HTTP_MAX_INITIAL_LINE_LENGTH = Setting.byteSizeSetting("http.max_initial_line_length", new ByteSizeValue(4, ByteSizeUnit.KB), false, Scope.CLUSTER) ; + public static final Setting SETTING_HTTP_PORT = + new Setting("http.port", "9200-9300", PortsRange::new, Property.NodeScope); + public static final Setting SETTING_HTTP_PUBLISH_PORT = + Setting.intSetting("http.publish_port", -1, -1, Property.NodeScope); + public static final Setting SETTING_HTTP_DETAILED_ERRORS_ENABLED = + Setting.boolSetting("http.detailed_errors.enabled", true, Property.NodeScope); + public static final Setting SETTING_HTTP_MAX_CONTENT_LENGTH = + Setting.byteSizeSetting("http.max_content_length", new ByteSizeValue(100, ByteSizeUnit.MB), Property.NodeScope); + public static final Setting SETTING_HTTP_MAX_CHUNK_SIZE = + Setting.byteSizeSetting("http.max_chunk_size", new ByteSizeValue(8, ByteSizeUnit.KB), Property.NodeScope); + public static final Setting SETTING_HTTP_MAX_HEADER_SIZE = + Setting.byteSizeSetting("http.max_header_size", new ByteSizeValue(8, ByteSizeUnit.KB), Property.NodeScope); + public static final Setting SETTING_HTTP_MAX_INITIAL_LINE_LENGTH = + Setting.byteSizeSetting("http.max_initial_line_length", new ByteSizeValue(4, ByteSizeUnit.KB), Property.NodeScope); // don't reset cookies by default, since I don't think we really need to // note, parsing cookies was fixed in netty 3.5.1 regarding stack allocation, but still, currently, we don't need cookies - public static final Setting SETTING_HTTP_RESET_COOKIES = Setting.boolSetting("http.reset_cookies", false, false, Scope.CLUSTER); + public static final Setting SETTING_HTTP_RESET_COOKIES = + Setting.boolSetting("http.reset_cookies", false, Property.NodeScope); private HttpTransportSettings() { } diff --git a/core/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransport.java b/core/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransport.java index e64c6401f71..332380d9fb1 100644 --- a/core/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransport.java +++ b/core/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransport.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.netty.OpenChannelsHandler; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.InetSocketTransportAddress; @@ -118,33 +119,32 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent SETTING_HTTP_NETTY_MAX_CUMULATION_BUFFER_CAPACITY = - Setting.byteSizeSetting("http.netty.max_cumulation_buffer_capacity", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER); + Setting.byteSizeSetting("http.netty.max_cumulation_buffer_capacity", new ByteSizeValue(-1), + Property.NodeScope); public static Setting SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS = - Setting.intSetting("http.netty.max_composite_buffer_components", -1, false, Setting.Scope.CLUSTER); + Setting.intSetting("http.netty.max_composite_buffer_components", -1, Property.NodeScope); public static final Setting SETTING_HTTP_WORKER_COUNT = new Setting<>("http.netty.worker_count", (s) -> Integer.toString(EsExecutors.boundedNumberOfProcessors(s) * 2), - (s) -> Setting.parseInt(s, 1, "http.netty.worker_count"), false, Setting.Scope.CLUSTER); + (s) -> Setting.parseInt(s, 1, "http.netty.worker_count"), Property.NodeScope); - public static final Setting SETTING_HTTP_TCP_NO_DELAY = boolSetting("http.tcp_no_delay", NetworkService.TcpSettings - .TCP_NO_DELAY, false, - Setting.Scope.CLUSTER); - public static final Setting SETTING_HTTP_TCP_KEEP_ALIVE = boolSetting("http.tcp.keep_alive", NetworkService.TcpSettings - .TCP_KEEP_ALIVE, false, - Setting.Scope.CLUSTER); - public static final Setting SETTING_HTTP_TCP_BLOCKING_SERVER = boolSetting("http.tcp.blocking_server", NetworkService - .TcpSettings.TCP_BLOCKING_SERVER, - false, Setting.Scope.CLUSTER); - public static final Setting SETTING_HTTP_TCP_REUSE_ADDRESS = boolSetting("http.tcp.reuse_address", NetworkService - .TcpSettings.TCP_REUSE_ADDRESS, - false, Setting.Scope.CLUSTER); + public static final Setting SETTING_HTTP_TCP_NO_DELAY = + boolSetting("http.tcp_no_delay", NetworkService.TcpSettings.TCP_NO_DELAY, Property.NodeScope); + public static final Setting SETTING_HTTP_TCP_KEEP_ALIVE = + boolSetting("http.tcp.keep_alive", NetworkService.TcpSettings.TCP_KEEP_ALIVE, Property.NodeScope); + public static final Setting SETTING_HTTP_TCP_BLOCKING_SERVER = + boolSetting("http.tcp.blocking_server", NetworkService.TcpSettings.TCP_BLOCKING_SERVER, Property.NodeScope); + public static final Setting SETTING_HTTP_TCP_REUSE_ADDRESS = + boolSetting("http.tcp.reuse_address", NetworkService.TcpSettings.TCP_REUSE_ADDRESS, Property.NodeScope); - public static final Setting SETTING_HTTP_TCP_SEND_BUFFER_SIZE = Setting.byteSizeSetting("http.tcp.send_buffer_size", - NetworkService.TcpSettings.TCP_SEND_BUFFER_SIZE, false, Setting.Scope.CLUSTER); - public static final Setting SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE = Setting.byteSizeSetting("http.tcp" + - ".receive_buffer_size", NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE, false, Setting.Scope.CLUSTER); - public static final Setting SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE = Setting.byteSizeSetting( - "transport.netty.receive_predictor_size", + public static final Setting SETTING_HTTP_TCP_SEND_BUFFER_SIZE = + Setting.byteSizeSetting("http.tcp.send_buffer_size", NetworkService.TcpSettings.TCP_SEND_BUFFER_SIZE, + Property.NodeScope); + public static final Setting SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE = + Setting.byteSizeSetting("http.tcp.receive_buffer_size", NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE, + Property.NodeScope); + public static final Setting SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE = + Setting.byteSizeSetting("transport.netty.receive_predictor_size", settings -> { long defaultReceiverPredictor = 512 * 1024; if (JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes() > 0) { @@ -154,13 +154,11 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MIN = byteSizeSetting("http.netty" + - ".receive_predictor_min", - SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE, false, Setting.Scope.CLUSTER); - public static final Setting SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MAX = byteSizeSetting("http.netty" + - ".receive_predictor_max", - SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE, false, Setting.Scope.CLUSTER); + }, Property.NodeScope); + public static final Setting SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MIN = + byteSizeSetting("http.netty.receive_predictor_min", SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE, Property.NodeScope); + public static final Setting SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MAX = + byteSizeSetting("http.netty.receive_predictor_max", SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE, Property.NodeScope); protected final NetworkService networkService; @@ -262,7 +260,7 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent Integer.MAX_VALUE) { - logger.warn("maxContentLength[" + maxContentLength + "] set to high value, resetting it to [100mb]"); + logger.warn("maxContentLength[{}] set to high value, resetting it to [100mb]", maxContentLength); maxContentLength = new ByteSizeValue(100, ByteSizeUnit.MB); } this.maxContentLength = maxContentLength; diff --git a/core/src/main/java/org/elasticsearch/index/IndexModule.java b/core/src/main/java/org/elasticsearch/index/IndexModule.java index eabc0951e7f..b6120bd9d78 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexModule.java +++ b/core/src/main/java/org/elasticsearch/index/IndexModule.java @@ -22,6 +22,7 @@ package org.elasticsearch.index; import org.apache.lucene.util.SetOnce; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.analysis.AnalysisRegistry; @@ -65,13 +66,16 @@ import java.util.function.Function; */ public final class IndexModule { - public static final Setting INDEX_STORE_TYPE_SETTING = new Setting<>("index.store.type", "", Function.identity(), false, Setting.Scope.INDEX); + public static final Setting INDEX_STORE_TYPE_SETTING = + new Setting<>("index.store.type", "", Function.identity(), Property.IndexScope); public static final String SIMILARITY_SETTINGS_PREFIX = "index.similarity"; public static final String INDEX_QUERY_CACHE = "index"; public static final String NONE_QUERY_CACHE = "none"; - public static final Setting INDEX_QUERY_CACHE_TYPE_SETTING = new Setting<>("index.queries.cache.type", INDEX_QUERY_CACHE, Function.identity(), false, Setting.Scope.INDEX); + public static final Setting INDEX_QUERY_CACHE_TYPE_SETTING = + new Setting<>("index.queries.cache.type", INDEX_QUERY_CACHE, Function.identity(), Property.IndexScope); // for test purposes only - public static final Setting INDEX_QUERY_CACHE_EVERYTHING_SETTING = Setting.boolSetting("index.queries.cache.everything", false, false, Setting.Scope.INDEX); + public static final Setting INDEX_QUERY_CACHE_EVERYTHING_SETTING = + Setting.boolSetting("index.queries.cache.everything", false, Property.IndexScope); private final IndexSettings indexSettings; private final IndexStoreConfig indexStoreConfig; private final AnalysisRegistry analysisRegistry; diff --git a/core/src/main/java/org/elasticsearch/index/IndexSettings.java b/core/src/main/java/org/elasticsearch/index/IndexSettings.java index b17b8ab7edf..b996e70b1e5 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/core/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -36,7 +37,6 @@ import org.elasticsearch.index.translog.Translog; import java.util.Locale; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Predicate; @@ -50,15 +50,26 @@ import java.util.function.Predicate; */ public final class IndexSettings { - public static final Setting DEFAULT_FIELD_SETTING = new Setting<>("index.query.default_field", AllFieldMapper.NAME, Function.identity(), false, Setting.Scope.INDEX); - public static final Setting QUERY_STRING_LENIENT_SETTING = Setting.boolSetting("index.query_string.lenient", false, false, Setting.Scope.INDEX); - public static final Setting QUERY_STRING_ANALYZE_WILDCARD = Setting.boolSetting("indices.query.query_string.analyze_wildcard", false, false, Setting.Scope.CLUSTER); - public static final Setting QUERY_STRING_ALLOW_LEADING_WILDCARD = Setting.boolSetting("indices.query.query_string.allowLeadingWildcard", true, false, Setting.Scope.CLUSTER); - public static final Setting ALLOW_UNMAPPED = Setting.boolSetting("index.query.parse.allow_unmapped_fields", true, false, Setting.Scope.INDEX); - public static final Setting INDEX_TRANSLOG_SYNC_INTERVAL_SETTING = Setting.timeSetting("index.translog.sync_interval", TimeValue.timeValueSeconds(5), TimeValue.timeValueMillis(100), false, Setting.Scope.INDEX); - public static final Setting INDEX_TRANSLOG_DURABILITY_SETTING = new Setting<>("index.translog.durability", Translog.Durability.REQUEST.name(), (value) -> Translog.Durability.valueOf(value.toUpperCase(Locale.ROOT)), true, Setting.Scope.INDEX); - public static final Setting INDEX_WARMER_ENABLED_SETTING = Setting.boolSetting("index.warmer.enabled", true, true, Setting.Scope.INDEX); - public static final Setting INDEX_TTL_DISABLE_PURGE_SETTING = Setting.boolSetting("index.ttl.disable_purge", false, true, Setting.Scope.INDEX); + public static final Setting DEFAULT_FIELD_SETTING = + new Setting<>("index.query.default_field", AllFieldMapper.NAME, Function.identity(), Property.IndexScope); + public static final Setting QUERY_STRING_LENIENT_SETTING = + Setting.boolSetting("index.query_string.lenient", false, Property.IndexScope); + public static final Setting QUERY_STRING_ANALYZE_WILDCARD = + Setting.boolSetting("indices.query.query_string.analyze_wildcard", false, Property.NodeScope); + public static final Setting QUERY_STRING_ALLOW_LEADING_WILDCARD = + Setting.boolSetting("indices.query.query_string.allowLeadingWildcard", true, Property.NodeScope); + public static final Setting ALLOW_UNMAPPED = + Setting.boolSetting("index.query.parse.allow_unmapped_fields", true, Property.IndexScope); + public static final Setting INDEX_TRANSLOG_SYNC_INTERVAL_SETTING = + Setting.timeSetting("index.translog.sync_interval", TimeValue.timeValueSeconds(5), TimeValue.timeValueMillis(100), + Property.IndexScope); + public static final Setting INDEX_TRANSLOG_DURABILITY_SETTING = + new Setting<>("index.translog.durability", Translog.Durability.REQUEST.name(), + (value) -> Translog.Durability.valueOf(value.toUpperCase(Locale.ROOT)), Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_WARMER_ENABLED_SETTING = + Setting.boolSetting("index.warmer.enabled", true, Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_TTL_DISABLE_PURGE_SETTING = + Setting.boolSetting("index.ttl.disable_purge", false, Property.Dynamic, Property.IndexScope); public static final Setting INDEX_CHECK_ON_STARTUP = new Setting<>("index.shard.check_on_startup", "false", (s) -> { switch(s) { case "false": @@ -69,7 +80,7 @@ public final class IndexSettings { default: throw new IllegalArgumentException("unknown value for [index.shard.check_on_startup] must be one of [true, false, fix, checksum] but was: " + s); } - }, false, Setting.Scope.INDEX); + }, Property.IndexScope); /** * Index setting describing the maximum value of from + size on a query. @@ -79,10 +90,15 @@ public final class IndexSettings { * safely. 1,000,000 is probably way to high for any cluster to set * safely. */ - public static final Setting MAX_RESULT_WINDOW_SETTING = Setting.intSetting("index.max_result_window", 10000, 1, true, Setting.Scope.INDEX); + public static final Setting MAX_RESULT_WINDOW_SETTING = + Setting.intSetting("index.max_result_window", 10000, 1, Property.Dynamic, Property.IndexScope); public static final TimeValue DEFAULT_REFRESH_INTERVAL = new TimeValue(1, TimeUnit.SECONDS); - public static final Setting INDEX_REFRESH_INTERVAL_SETTING = Setting.timeSetting("index.refresh_interval", DEFAULT_REFRESH_INTERVAL, new TimeValue(-1, TimeUnit.MILLISECONDS), true, Setting.Scope.INDEX); - public static final Setting INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING = Setting.byteSizeSetting("index.translog.flush_threshold_size", new ByteSizeValue(512, ByteSizeUnit.MB), true, Setting.Scope.INDEX); + public static final Setting INDEX_REFRESH_INTERVAL_SETTING = + Setting.timeSetting("index.refresh_interval", DEFAULT_REFRESH_INTERVAL, new TimeValue(-1, TimeUnit.MILLISECONDS), + Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING = + Setting.byteSizeSetting("index.translog.flush_threshold_size", new ByteSizeValue(512, ByteSizeUnit.MB), Property.Dynamic, + Property.IndexScope); /** @@ -90,7 +106,9 @@ public final class IndexSettings { * This setting is realtime updateable */ public static final TimeValue DEFAULT_GC_DELETES = TimeValue.timeValueSeconds(60); - public static final Setting INDEX_GC_DELETES_SETTING = Setting.timeSetting("index.gc_deletes", DEFAULT_GC_DELETES, new TimeValue(-1, TimeUnit.MILLISECONDS), true, Setting.Scope.INDEX); + public static final Setting INDEX_GC_DELETES_SETTING = + Setting.timeSetting("index.gc_deletes", DEFAULT_GC_DELETES, new TimeValue(-1, TimeUnit.MILLISECONDS), Property.Dynamic, + Property.IndexScope); private final Index index; private final Version version; diff --git a/core/src/main/java/org/elasticsearch/index/IndexWarmer.java b/core/src/main/java/org/elasticsearch/index/IndexWarmer.java index 9fabc8efc40..332fcdd380e 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexWarmer.java +++ b/core/src/main/java/org/elasticsearch/index/IndexWarmer.java @@ -19,14 +19,10 @@ package org.elasticsearch.index; -import com.carrotsearch.hppc.ObjectHashSet; -import com.carrotsearch.hppc.ObjectSet; -import com.carrotsearch.hppc.cursors.ObjectCursor; -import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.NumericDocValues; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.engine.Engine; @@ -56,14 +52,13 @@ public final class IndexWarmer extends AbstractComponent { public static final Setting INDEX_NORMS_LOADING_SETTING = new Setting<>("index.norms.loading", MappedFieldType.Loading.LAZY.toString(), (s) -> MappedFieldType.Loading.parse(s, MappedFieldType.Loading.LAZY), - false, Setting.Scope.INDEX); + Property.IndexScope); private final List listeners; IndexWarmer(Settings settings, ThreadPool threadPool, Listener... listeners) { super(settings); ArrayList list = new ArrayList<>(); final Executor executor = threadPool.executor(ThreadPool.Names.WARMER); - list.add(new NormsWarmer(executor)); list.add(new FieldDataWarmer(executor)); for (Listener listener : listeners) { list.add(listener); @@ -137,64 +132,6 @@ public final class IndexWarmer extends AbstractComponent { TerminationHandle warmTopReader(IndexShard indexShard, Engine.Searcher searcher); } - private static class NormsWarmer implements IndexWarmer.Listener { - private final Executor executor; - public NormsWarmer(Executor executor) { - this.executor = executor; - } - @Override - public TerminationHandle warmNewReaders(final IndexShard indexShard, final Engine.Searcher searcher) { - final MappedFieldType.Loading defaultLoading = indexShard.indexSettings().getValue(INDEX_NORMS_LOADING_SETTING); - final MapperService mapperService = indexShard.mapperService(); - final ObjectSet warmUp = new ObjectHashSet<>(); - for (DocumentMapper docMapper : mapperService.docMappers(false)) { - for (FieldMapper fieldMapper : docMapper.mappers()) { - final String indexName = fieldMapper.fieldType().name(); - MappedFieldType.Loading normsLoading = fieldMapper.fieldType().normsLoading(); - if (normsLoading == null) { - normsLoading = defaultLoading; - } - if (fieldMapper.fieldType().indexOptions() != IndexOptions.NONE && !fieldMapper.fieldType().omitNorms() - && normsLoading == MappedFieldType.Loading.EAGER) { - warmUp.add(indexName); - } - } - } - - final CountDownLatch latch = new CountDownLatch(1); - // Norms loading may be I/O intensive but is not CPU intensive, so we execute it in a single task - executor.execute(() -> { - try { - for (ObjectCursor stringObjectCursor : warmUp) { - final String indexName = stringObjectCursor.value; - final long start = System.nanoTime(); - for (final LeafReaderContext ctx : searcher.reader().leaves()) { - final NumericDocValues values = ctx.reader().getNormValues(indexName); - if (values != null) { - values.get(0); - } - } - if (indexShard.warmerService().logger().isTraceEnabled()) { - indexShard.warmerService().logger().trace("warmed norms for [{}], took [{}]", indexName, - TimeValue.timeValueNanos(System.nanoTime() - start)); - } - } - } catch (Throwable t) { - indexShard.warmerService().logger().warn("failed to warm-up norms", t); - } finally { - latch.countDown(); - } - }); - - return () -> latch.await(); - } - - @Override - public TerminationHandle warmTopReader(IndexShard indexShard, final Engine.Searcher searcher) { - return TerminationHandle.NO_WAIT; - } - } - private static class FieldDataWarmer implements IndexWarmer.Listener { private final Executor executor; diff --git a/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java b/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java index d6fa552b203..ff10179f026 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java +++ b/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.index.engine.Engine; @@ -54,12 +55,23 @@ public final class IndexingSlowLog implements IndexingOperationListener { private final ESLogger indexLogger; private static final String INDEX_INDEXING_SLOWLOG_PREFIX = "index.indexing.slowlog"; - public static final Setting INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING = Setting.timeSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.warn", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX); - public static final Setting INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO_SETTING = Setting.timeSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.info", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX); - public static final Setting INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG_SETTING = Setting.timeSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.debug", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX); - public static final Setting INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE_SETTING = Setting.timeSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.trace", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX); - public static final Setting INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING = Setting.boolSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".reformat", true, true, Setting.Scope.INDEX); - public static final Setting INDEX_INDEXING_SLOWLOG_LEVEL_SETTING = new Setting<>(INDEX_INDEXING_SLOWLOG_PREFIX +".level", SlowLogLevel.TRACE.name(), SlowLogLevel::parse, true, Setting.Scope.INDEX); + public static final Setting INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING = + Setting.timeSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.warn", TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO_SETTING = + Setting.timeSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.info", TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG_SETTING = + Setting.timeSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.debug", TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE_SETTING = + Setting.timeSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.trace", TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING = + Setting.boolSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".reformat", true, Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_INDEXING_SLOWLOG_LEVEL_SETTING = + new Setting<>(INDEX_INDEXING_SLOWLOG_PREFIX +".level", SlowLogLevel.TRACE.name(), SlowLogLevel::parse, Property.Dynamic, + Property.IndexScope); /** * Reads how much of the source to log. The user can specify any value they * like and numbers are interpreted the maximum number of characters to log @@ -72,7 +84,7 @@ public final class IndexingSlowLog implements IndexingOperationListener { } catch (NumberFormatException e) { return Booleans.parseBoolean(value, true) ? Integer.MAX_VALUE : 0; } - }, true, Setting.Scope.INDEX); + }, Property.Dynamic, Property.IndexScope); IndexingSlowLog(IndexSettings indexSettings) { this.indexLogger = Loggers.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX + ".index", indexSettings.getSettings()); diff --git a/core/src/main/java/org/elasticsearch/index/MergePolicyConfig.java b/core/src/main/java/org/elasticsearch/index/MergePolicyConfig.java index fc9f30cf3fd..c8d82eae888 100644 --- a/core/src/main/java/org/elasticsearch/index/MergePolicyConfig.java +++ b/core/src/main/java/org/elasticsearch/index/MergePolicyConfig.java @@ -25,6 +25,7 @@ import org.apache.lucene.index.TieredMergePolicy; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -126,15 +127,31 @@ public final class MergePolicyConfig { public static final ByteSizeValue DEFAULT_MAX_MERGED_SEGMENT = new ByteSizeValue(5, ByteSizeUnit.GB); public static final double DEFAULT_SEGMENTS_PER_TIER = 10.0d; public static final double DEFAULT_RECLAIM_DELETES_WEIGHT = 2.0d; - public static final Setting INDEX_COMPOUND_FORMAT_SETTING = new Setting<>("index.compound_format", Double.toString(TieredMergePolicy.DEFAULT_NO_CFS_RATIO), MergePolicyConfig::parseNoCFSRatio, true, Setting.Scope.INDEX); + public static final Setting INDEX_COMPOUND_FORMAT_SETTING = + new Setting<>("index.compound_format", Double.toString(TieredMergePolicy.DEFAULT_NO_CFS_RATIO), MergePolicyConfig::parseNoCFSRatio, + Property.Dynamic, Property.IndexScope); - public static final Setting INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING = Setting.doubleSetting("index.merge.policy.expunge_deletes_allowed", DEFAULT_EXPUNGE_DELETES_ALLOWED, 0.0d, true, Setting.Scope.INDEX); - public static final Setting INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING = Setting.byteSizeSetting("index.merge.policy.floor_segment", DEFAULT_FLOOR_SEGMENT, true, Setting.Scope.INDEX); - public static final Setting INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING = Setting.intSetting("index.merge.policy.max_merge_at_once", DEFAULT_MAX_MERGE_AT_ONCE, 2, true, Setting.Scope.INDEX); - public static final Setting INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT_SETTING = Setting.intSetting("index.merge.policy.max_merge_at_once_explicit", DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT, 2, true, Setting.Scope.INDEX); - public static final Setting INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING = Setting.byteSizeSetting("index.merge.policy.max_merged_segment", DEFAULT_MAX_MERGED_SEGMENT, true, Setting.Scope.INDEX); - public static final Setting INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING = Setting.doubleSetting("index.merge.policy.segments_per_tier", DEFAULT_SEGMENTS_PER_TIER, 2.0d, true, Setting.Scope.INDEX); - public static final Setting INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING = Setting.doubleSetting("index.merge.policy.reclaim_deletes_weight", DEFAULT_RECLAIM_DELETES_WEIGHT, 0.0d, true, Setting.Scope.INDEX); + public static final Setting INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING = + Setting.doubleSetting("index.merge.policy.expunge_deletes_allowed", DEFAULT_EXPUNGE_DELETES_ALLOWED, 0.0d, + Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING = + Setting.byteSizeSetting("index.merge.policy.floor_segment", DEFAULT_FLOOR_SEGMENT, + Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING = + Setting.intSetting("index.merge.policy.max_merge_at_once", DEFAULT_MAX_MERGE_AT_ONCE, 2, + Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT_SETTING = + Setting.intSetting("index.merge.policy.max_merge_at_once_explicit", DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT, 2, + Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING = + Setting.byteSizeSetting("index.merge.policy.max_merged_segment", DEFAULT_MAX_MERGED_SEGMENT, + Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING = + Setting.doubleSetting("index.merge.policy.segments_per_tier", DEFAULT_SEGMENTS_PER_TIER, 2.0d, + Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING = + Setting.doubleSetting("index.merge.policy.reclaim_deletes_weight", DEFAULT_RECLAIM_DELETES_WEIGHT, 0.0d, + Property.Dynamic, Property.IndexScope); public static final String INDEX_MERGE_ENABLED = "index.merge.enabled"; // don't convert to Setting<> and register... we only set this in tests and register via a plugin diff --git a/core/src/main/java/org/elasticsearch/index/MergeSchedulerConfig.java b/core/src/main/java/org/elasticsearch/index/MergeSchedulerConfig.java index 0d212a4eb30..2eb43a50ee4 100644 --- a/core/src/main/java/org/elasticsearch/index/MergeSchedulerConfig.java +++ b/core/src/main/java/org/elasticsearch/index/MergeSchedulerConfig.java @@ -21,6 +21,7 @@ package org.elasticsearch.index; import org.apache.lucene.index.ConcurrentMergeScheduler; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.util.concurrent.EsExecutors; /** @@ -51,9 +52,17 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; */ public final class MergeSchedulerConfig { - public static final Setting MAX_THREAD_COUNT_SETTING = new Setting<>("index.merge.scheduler.max_thread_count", (s) -> Integer.toString(Math.max(1, Math.min(4, EsExecutors.boundedNumberOfProcessors(s) / 2))), (s) -> Setting.parseInt(s, 1, "index.merge.scheduler.max_thread_count"), true, Setting.Scope.INDEX); - public static final Setting MAX_MERGE_COUNT_SETTING = new Setting<>("index.merge.scheduler.max_merge_count", (s) -> Integer.toString(MAX_THREAD_COUNT_SETTING.get(s) + 5), (s) -> Setting.parseInt(s, 1, "index.merge.scheduler.max_merge_count"), true, Setting.Scope.INDEX); - public static final Setting AUTO_THROTTLE_SETTING = Setting.boolSetting("index.merge.scheduler.auto_throttle", true, true, Setting.Scope.INDEX); + public static final Setting MAX_THREAD_COUNT_SETTING = + new Setting<>("index.merge.scheduler.max_thread_count", + (s) -> Integer.toString(Math.max(1, Math.min(4, EsExecutors.boundedNumberOfProcessors(s) / 2))), + (s) -> Setting.parseInt(s, 1, "index.merge.scheduler.max_thread_count"), Property.Dynamic, + Property.IndexScope); + public static final Setting MAX_MERGE_COUNT_SETTING = + new Setting<>("index.merge.scheduler.max_merge_count", + (s) -> Integer.toString(MAX_THREAD_COUNT_SETTING.get(s) + 5), + (s) -> Setting.parseInt(s, 1, "index.merge.scheduler.max_merge_count"), Property.Dynamic, Property.IndexScope); + public static final Setting AUTO_THROTTLE_SETTING = + Setting.boolSetting("index.merge.scheduler.auto_throttle", true, Property.Dynamic, Property.IndexScope); private volatile boolean autoThrottle; private volatile int maxThreadCount; diff --git a/core/src/main/java/org/elasticsearch/index/SearchSlowLog.java b/core/src/main/java/org/elasticsearch/index/SearchSlowLog.java index df3139fe57c..cfa779d64aa 100644 --- a/core/src/main/java/org/elasticsearch/index/SearchSlowLog.java +++ b/core/src/main/java/org/elasticsearch/index/SearchSlowLog.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.search.internal.SearchContext; @@ -50,16 +51,35 @@ public final class SearchSlowLog { private final ESLogger fetchLogger; private static final String INDEX_SEARCH_SLOWLOG_PREFIX = "index.search.slowlog"; - public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING = Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.query.warn", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX); - public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING = Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.query.info", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX); - public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG_SETTING = Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.query.debug", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX); - public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING = Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.query.trace", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX); - public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING = Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.fetch.warn", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX); - public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING = Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.fetch.info", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX); - public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING = Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.fetch.debug", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX); - public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING = Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.fetch.trace", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX); - public static final Setting INDEX_SEARCH_SLOWLOG_REFORMAT = Setting.boolSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".reformat", true, true, Setting.Scope.INDEX); - public static final Setting INDEX_SEARCH_SLOWLOG_LEVEL = new Setting<>(INDEX_SEARCH_SLOWLOG_PREFIX + ".level", SlowLogLevel.TRACE.name(), SlowLogLevel::parse, true, Setting.Scope.INDEX); + public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING = + Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.query.warn", TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING = + Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.query.info", TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG_SETTING = + Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.query.debug", TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING = + Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.query.trace", TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING = + Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.fetch.warn", TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING = + Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.fetch.info", TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING = + Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.fetch.debug", TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING = + Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.fetch.trace", TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_SEARCH_SLOWLOG_REFORMAT = + Setting.boolSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".reformat", true, Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_SEARCH_SLOWLOG_LEVEL = + new Setting<>(INDEX_SEARCH_SLOWLOG_PREFIX + ".level", SlowLogLevel.TRACE.name(), SlowLogLevel::parse, Property.Dynamic, + Property.IndexScope); public SearchSlowLog(IndexSettings indexSettings) { diff --git a/core/src/main/java/org/elasticsearch/index/analysis/AnalysisService.java b/core/src/main/java/org/elasticsearch/index/analysis/AnalysisService.java index 09e96f3743b..453552b9dd1 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/AnalysisService.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/AnalysisService.java @@ -155,7 +155,7 @@ public class AnalysisService extends AbstractIndexComponent implements Closeable // because analyzers are aliased, they might be closed several times // an NPE is thrown in this case, so ignore.... } catch (Exception e) { - logger.debug("failed to close analyzer " + analyzer); + logger.debug("failed to close analyzer {}", analyzer); } } } diff --git a/core/src/main/java/org/elasticsearch/index/analysis/NamedAnalyzer.java b/core/src/main/java/org/elasticsearch/index/analysis/NamedAnalyzer.java index 25ff8f96834..1dd562c4bb1 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/NamedAnalyzer.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/NamedAnalyzer.java @@ -93,7 +93,7 @@ public class NamedAnalyzer extends DelegatingAnalyzerWrapper { public String toString() { return "analyzer name[" + name + "], analyzer [" + analyzer + "]"; } - + /** It is an error if this is ever used, it means we screwed up! */ static final ReuseStrategy ERROR_STRATEGY = new Analyzer.ReuseStrategy() { @Override diff --git a/core/src/main/java/org/elasticsearch/index/analysis/NumericDoubleAnalyzer.java b/core/src/main/java/org/elasticsearch/index/analysis/NumericDoubleAnalyzer.java index e90409421d2..77716e7a43d 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/NumericDoubleAnalyzer.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/NumericDoubleAnalyzer.java @@ -56,4 +56,4 @@ public class NumericDoubleAnalyzer extends NumericAnalyzer protected NumericFloatTokenizer createNumericTokenizer(char[] buffer) throws IOException { return new NumericFloatTokenizer(precisionStep, buffer); } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/index/analysis/NumericLongAnalyzer.java b/core/src/main/java/org/elasticsearch/index/analysis/NumericLongAnalyzer.java index ab112396392..9b865920341 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/NumericLongAnalyzer.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/NumericLongAnalyzer.java @@ -56,4 +56,4 @@ public class NumericLongAnalyzer extends NumericAnalyzer { protected NumericLongTokenizer createNumericTokenizer(char[] buffer) throws IOException { return new NumericLongTokenizer(precisionStep, buffer); } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java b/core/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java index f7802330ab7..19ec3c8402e 100644 --- a/core/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java +++ b/core/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java @@ -38,6 +38,7 @@ import org.elasticsearch.common.cache.RemovalListener; import org.elasticsearch.common.cache.RemovalNotification; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.IndexSettings; @@ -70,7 +71,8 @@ import java.util.concurrent.Executor; */ public final class BitsetFilterCache extends AbstractIndexComponent implements LeafReader.CoreClosedListener, RemovalListener>, Closeable { - public static final Setting INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING = Setting.boolSetting("index.load_fixed_bitset_filters_eagerly", true, false, Setting.Scope.INDEX); + public static final Setting INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING = + Setting.boolSetting("index.load_fixed_bitset_filters_eagerly", true, Property.IndexScope); private final boolean loadRandomAccessFiltersEagerly; private final Cache> loadedFilters; diff --git a/core/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java b/core/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java index 6dd710e4e89..965a2e58f9c 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java +++ b/core/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java @@ -129,9 +129,9 @@ class ElasticsearchConcurrentMergeScheduler extends ConcurrentMergeScheduler { merge.rateLimiter.getMBPerSec()); if (tookMS > 20000) { // if more than 20 seconds, DEBUG log it - logger.debug(message); + logger.debug("{}", message); } else if (logger.isTraceEnabled()) { - logger.trace(message); + logger.trace("{}", message); } } } diff --git a/core/src/main/java/org/elasticsearch/index/engine/Engine.java b/core/src/main/java/org/elasticsearch/index/engine/Engine.java index 3c5583440e0..bb7aa0ea71a 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -671,7 +671,7 @@ public abstract class Engine implements Closeable { closeNoLock("engine failed on: [" + reason + "]"); } finally { if (failedEngine != null) { - logger.debug("tried to fail engine but engine is already failed. ignoring. [{}]", reason, failure); + logger.debug("tried to fail engine but engine is already failed. ignoring. [{}]", failure, reason); return; } logger.warn("failed engine [{}]", failure, reason); @@ -697,7 +697,7 @@ public abstract class Engine implements Closeable { store.decRef(); } } else { - logger.debug("tried to fail engine but could not acquire lock - engine should be failed by now [{}]", reason, failure); + logger.debug("tried to fail engine but could not acquire lock - engine should be failed by now [{}]", failure, reason); } } diff --git a/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java b/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java index 9740ccd0358..14a8f043234 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java +++ b/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java @@ -26,6 +26,7 @@ import org.apache.lucene.search.QueryCache; import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.similarities.Similarity; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -39,8 +40,6 @@ import org.elasticsearch.index.translog.TranslogConfig; import org.elasticsearch.indices.IndexingMemoryController; import org.elasticsearch.threadpool.ThreadPool; -import java.util.Set; - /* * Holds all the configuration that is used to create an {@link Engine}. * Once {@link Engine} has been created with this object, changes to this @@ -83,7 +82,7 @@ public final class EngineConfig { } return s; } - }, false, Setting.Scope.INDEX); + }, Property.IndexScope); /** if set to true the engine will start even if the translog id in the commit point can not be found */ public static final String INDEX_FORCE_NEW_TRANSLOG = "index.engine.force_new_translog"; diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java b/core/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java index 94e9edc5b94..24e49ec63e6 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java @@ -24,6 +24,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.fielddata.plain.AbstractGeoPointDVIndexFieldData; @@ -67,7 +68,7 @@ public class IndexFieldDataService extends AbstractIndexComponent implements Clo default: throw new IllegalArgumentException("failed to parse [" + s + "] must be one of [node,node]"); } - }, false, Setting.Scope.INDEX); + }, Property.IndexScope); private static final IndexFieldData.Builder MISSING_DOC_VALUES_BUILDER = (indexProperties, fieldType, cache, breakerService, mapperService1) -> { throw new IllegalStateException("Can't load fielddata on [" + fieldType.name() @@ -230,13 +231,13 @@ public class IndexFieldDataService extends AbstractIndexComponent implements Clo IndexFieldData.Builder builder = null; String format = type.getFormat(indexSettings.getSettings()); if (format != null && FieldDataType.DOC_VALUES_FORMAT_VALUE.equals(format) && !docValues) { - logger.warn("field [" + fieldName + "] has no doc values, will use default field data format"); + logger.warn("field [{}] has no doc values, will use default field data format", fieldName); format = null; } if (format != null) { builder = buildersByTypeAndFormat.get(Tuple.tuple(type.getType(), format)); if (builder == null) { - logger.warn("failed to find format [" + format + "] for field [" + fieldName + "], will use default"); + logger.warn("failed to find format [{}] for field [{}], will use default", format, fieldName); } } if (builder == null && docValues) { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java index 28ebb254661..20522abfbac 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.analysis.NamedAnalyzer; @@ -49,8 +50,10 @@ import java.util.Map; import java.util.stream.StreamSupport; public abstract class FieldMapper extends Mapper implements Cloneable { - public static final Setting IGNORE_MALFORMED_SETTING = Setting.boolSetting("index.mapping.ignore_malformed", false, false, Setting.Scope.INDEX); - public static final Setting COERCE_SETTING = Setting.boolSetting("index.mapping.coerce", false, false, Setting.Scope.INDEX); + public static final Setting IGNORE_MALFORMED_SETTING = + Setting.boolSetting("index.mapping.ignore_malformed", false, Property.IndexScope); + public static final Setting COERCE_SETTING = + Setting.boolSetting("index.mapping.coerce", false, Property.IndexScope); public abstract static class Builder extends Mapper.Builder { protected final MappedFieldType fieldType; @@ -200,11 +203,6 @@ public abstract class FieldMapper extends Mapper implements Cloneable { return builder; } - public T normsLoading(MappedFieldType.Loading normsLoading) { - this.fieldType.setNormsLoading(normsLoading); - return builder; - } - public T fieldDataSettings(Settings settings) { this.fieldDataSettings = settings; return builder; @@ -240,6 +238,9 @@ public abstract class FieldMapper extends Mapper implements Cloneable { protected void setupFieldType(BuilderContext context) { fieldType.setName(buildFullName(context)); + if (context.indexCreatedVersion().before(Version.V_5_0_0)) { + fieldType.setOmitNorms(fieldType.omitNorms() && fieldType.boost() == 1.0f); + } if (fieldType.indexAnalyzer() == null && fieldType.tokenized() == false && fieldType.indexOptions() != IndexOptions.NONE) { fieldType.setIndexAnalyzer(Lucene.KEYWORD_ANALYZER); fieldType.setSearchAnalyzer(Lucene.KEYWORD_ANALYZER); @@ -416,15 +417,8 @@ public abstract class FieldMapper extends Mapper implements Cloneable { if (includeDefaults || fieldType().storeTermVectors() != defaultFieldType.storeTermVectors()) { builder.field("term_vector", termVectorOptionsToString(fieldType())); } - if (includeDefaults || fieldType().omitNorms() != defaultFieldType.omitNorms() || fieldType().normsLoading() != null) { - builder.startObject("norms"); - if (includeDefaults || fieldType().omitNorms() != defaultFieldType.omitNorms()) { - builder.field("enabled", !fieldType().omitNorms()); - } - if (fieldType().normsLoading() != null) { - builder.field(MappedFieldType.Loading.KEY, fieldType().normsLoading()); - } - builder.endObject(); + if (includeDefaults || fieldType().omitNorms() != defaultFieldType.omitNorms()) { + builder.field("norms", fieldType().omitNorms() == false); } if (indexed && (includeDefaults || fieldType().indexOptions() != defaultFieldType.indexOptions())) { builder.field("index_options", indexOptionToString(fieldType().indexOptions())); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java b/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java index 10b165ff4c5..98ad76f7fe1 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java @@ -103,7 +103,6 @@ public abstract class MappedFieldType extends FieldType { private NamedAnalyzer searchAnalyzer; private NamedAnalyzer searchQuoteAnalyzer; private SimilarityProvider similarity; - private Loading normsLoading; private FieldDataType fieldDataType; private Object nullValue; private String nullValueAsString; // for sending null value to _all field @@ -117,7 +116,6 @@ public abstract class MappedFieldType extends FieldType { this.searchAnalyzer = ref.searchAnalyzer(); this.searchQuoteAnalyzer = ref.searchQuoteAnalyzer(); this.similarity = ref.similarity(); - this.normsLoading = ref.normsLoading(); this.fieldDataType = ref.fieldDataType(); this.nullValue = ref.nullValue(); this.nullValueAsString = ref.nullValueAsString(); @@ -158,7 +156,6 @@ public abstract class MappedFieldType extends FieldType { Objects.equals(indexAnalyzer, fieldType.indexAnalyzer) && Objects.equals(searchAnalyzer, fieldType.searchAnalyzer) && Objects.equals(searchQuoteAnalyzer(), fieldType.searchQuoteAnalyzer()) && - Objects.equals(normsLoading, fieldType.normsLoading) && Objects.equals(fieldDataType, fieldType.fieldDataType) && Objects.equals(nullValue, fieldType.nullValue) && Objects.equals(nullValueAsString, fieldType.nullValueAsString); @@ -167,7 +164,7 @@ public abstract class MappedFieldType extends FieldType { @Override public int hashCode() { return Objects.hash(super.hashCode(), name, boost, docValues, indexAnalyzer, searchAnalyzer, searchQuoteAnalyzer, - similarity == null ? null : similarity.name(), normsLoading, fieldDataType, nullValue, nullValueAsString); + similarity == null ? null : similarity.name(), fieldDataType, nullValue, nullValueAsString); } // norelease: we need to override freeze() and add safety checks that all settings are actually set @@ -205,7 +202,7 @@ public abstract class MappedFieldType extends FieldType { conflicts.add("mapper [" + name() + "] has different [doc_values] values"); } if (omitNorms() && !other.omitNorms()) { - conflicts.add("mapper [" + name() + "] has different [omit_norms] values, cannot change from disable to enabled"); + conflicts.add("mapper [" + name() + "] has different [norms] values, cannot change from disable to enabled"); } if (storeTermVectors() != other.storeTermVectors()) { conflicts.add("mapper [" + name() + "] has different [store_term_vector] values"); @@ -242,9 +239,6 @@ public abstract class MappedFieldType extends FieldType { if (boost() != other.boost()) { conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [boost] across all types."); } - if (normsLoading() != other.normsLoading()) { - conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [norms.loading] across all types."); - } if (Objects.equals(searchAnalyzer(), other.searchAnalyzer()) == false) { conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [search_analyzer] across all types."); } @@ -304,15 +298,6 @@ public abstract class MappedFieldType extends FieldType { this.docValues = hasDocValues; } - public Loading normsLoading() { - return normsLoading; - } - - public void setNormsLoading(Loading normsLoading) { - checkIfFrozen(); - this.normsLoading = normsLoading; - } - public NamedAnalyzer indexAnalyzer() { return indexAnalyzer; } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java index b25f5f6a02d..414ea0f7e9c 100755 --- a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -27,6 +27,7 @@ import org.elasticsearch.ElasticsearchGenerationException; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.AnalysisService; @@ -81,9 +82,11 @@ public class MapperService extends AbstractIndexComponent implements Closeable { } public static final String DEFAULT_MAPPING = "_default_"; - public static final Setting INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING = Setting.longSetting("index.mapping.nested_fields.limit", 50L, 0, true, Setting.Scope.INDEX); + public static final Setting INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING = + Setting.longSetting("index.mapping.nested_fields.limit", 50L, 0, Property.Dynamic, Property.IndexScope); public static final boolean INDEX_MAPPER_DYNAMIC_DEFAULT = true; - public static final Setting INDEX_MAPPER_DYNAMIC_SETTING = Setting.boolSetting("index.mapper.dynamic", INDEX_MAPPER_DYNAMIC_DEFAULT, false, Setting.Scope.INDEX); + public static final Setting INDEX_MAPPER_DYNAMIC_SETTING = + Setting.boolSetting("index.mapper.dynamic", INDEX_MAPPER_DYNAMIC_DEFAULT, Property.IndexScope); private static ObjectHashSet META_FIELDS = ObjectHashSet.from( "_uid", "_id", "_type", "_all", "_parent", "_routing", "_index", "_size", "_timestamp", "_ttl" diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/KeywordFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/KeywordFieldMapper.java index 3f01493590c..744882e1ccd 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/KeywordFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/KeywordFieldMapper.java @@ -92,14 +92,6 @@ public final class KeywordFieldMapper extends FieldMapper implements AllFieldMap return super.indexOptions(indexOptions); } - @Override - protected void setupFieldType(BuilderContext context) { - if (!omitNormsSet && fieldType.boost() != 1.0f) { - fieldType.setOmitNorms(false); - } - super.setupFieldType(context); - } - @Override public KeywordFieldMapper build(BuilderContext context) { setupFieldType(context); @@ -128,6 +120,9 @@ public final class KeywordFieldMapper extends FieldMapper implements AllFieldMap } else if (propName.equals("ignore_above")) { builder.ignoreAbove(XContentMapValues.nodeIntegerValue(propNode, -1)); iterator.remove(); + } else if (propName.equals("norms")) { + builder.omitNorms(XContentMapValues.nodeBooleanValue(propNode) == false); + iterator.remove(); } else if (parseMultiField(builder, name, parserContext, propName, propNode)) { iterator.remove(); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java index 7c2a38eaee7..4b4c0882508 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java @@ -31,8 +31,10 @@ import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.IndexableFieldType; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.Version; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -52,7 +54,9 @@ import java.util.List; * */ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldMapper.IncludeInAll { - private static final Setting COERCE_SETTING = Setting.boolSetting("index.mapping.coerce", true, false, Setting.Scope.INDEX); // this is private since it has a different default + // this is private since it has a different default + private static final Setting COERCE_SETTING = + Setting.boolSetting("index.mapping.coerce", true, Property.IndexScope); public static class Defaults { @@ -113,7 +117,6 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM protected void setupFieldType(BuilderContext context) { super.setupFieldType(context); - fieldType.setOmitNorms(fieldType.omitNorms() && fieldType.boost() == 1.0f); int precisionStep = fieldType.numericPrecisionStep(); if (precisionStep <= 0 || precisionStep >= maxPrecisionStep()) { fieldType.setNumericPrecisionStep(Integer.MAX_VALUE); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java index 656d6effcfa..4301a2252d8 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java @@ -157,13 +157,30 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc fieldName); final Object index = node.remove("index"); final boolean keyword = index != null && "analyzed".equals(index) == false; - // upgrade the index setting - node.put("index", "no".equals(index) == false); + { + // upgrade the index setting + node.put("index", "no".equals(index) == false); + } + { + // upgrade norms settings + Object norms = node.remove("norms"); + if (norms instanceof Map) { + norms = ((Map) norms).get("enabled"); + } + if (norms != null) { + node.put("norms", TypeParsers.nodeBooleanValue("norms", norms, parserContext)); + } + Object omitNorms = node.remove("omit_norms"); + if (omitNorms != null) { + node.put("norms", TypeParsers.nodeBooleanValue("omit_norms", omitNorms, parserContext) == false); + } + } if (keyword) { return new KeywordFieldMapper.TypeParser().parse(fieldName, node, parserContext); } else { return new TextFieldMapper.TypeParser().parse(fieldName, node, parserContext); } + } throw new IllegalArgumentException("The [string] type is removed in 5.0. You should now use either a [text] " + "or [keyword] field instead for field [" + fieldName + "]"); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java b/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java index c42de2f611f..c6b91292ace 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java @@ -71,7 +71,7 @@ public class TypeParsers { private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(TypeParsers.class)); private static final Set BOOLEAN_STRINGS = new HashSet<>(Arrays.asList("true", "false")); - private static boolean nodeBooleanValue(String name, Object node, Mapper.TypeParser.ParserContext parserContext) { + public static boolean nodeBooleanValue(String name, Object node, Mapper.TypeParser.ParserContext parserContext) { // Hook onto ParseFieldMatcher so that parsing becomes strict when setting index.query.parse.strict if (parserContext.parseFieldMatcher().isStrict()) { return XContentMapValues.nodeBooleanValue(node); @@ -99,9 +99,6 @@ public class TypeParsers { } else if (propName.equals("coerce")) { builder.coerce(nodeBooleanValue("coerce", propNode, parserContext)); iterator.remove(); - } else if (propName.equals("omit_norms")) { - builder.omitNorms(nodeBooleanValue("omit_norms", propNode, parserContext)); - iterator.remove(); } else if (propName.equals("similarity")) { SimilarityProvider similarityProvider = resolveSimilarity(parserContext, name, propNode.toString()); builder.similarity(similarityProvider); @@ -187,6 +184,37 @@ public class TypeParsers { } } + public static boolean parseNorms(FieldMapper.Builder builder, String propName, Object propNode, Mapper.TypeParser.ParserContext parserContext) { + if (propName.equals("norms")) { + if (propNode instanceof Map) { + final Map properties = nodeMapValue(propNode, "norms"); + for (Iterator> propsIterator = properties.entrySet().iterator(); propsIterator.hasNext();) { + Entry entry2 = propsIterator.next(); + final String propName2 = Strings.toUnderscoreCase(entry2.getKey()); + final Object propNode2 = entry2.getValue(); + if (propName2.equals("enabled")) { + builder.omitNorms(!lenientNodeBooleanValue(propNode2)); + propsIterator.remove(); + } else if (propName2.equals(Loading.KEY)) { + // ignore for bw compat + propsIterator.remove(); + } + } + DocumentMapperParser.checkNoRemainingFields(propName, properties, parserContext.indexVersionCreated()); + DEPRECATION_LOGGER.deprecated("The [norms{enabled:true/false}] way of specifying norms is deprecated, please use [norms:true/false] instead"); + } else { + builder.omitNorms(nodeBooleanValue("norms", propNode, parserContext) == false); + } + return true; + } else if (propName.equals("omit_norms")) { + builder.omitNorms(nodeBooleanValue("norms", propNode, parserContext)); + DEPRECATION_LOGGER.deprecated("[omit_norms] is deprecated, please use [norms] instead with the opposite boolean value"); + return true; + } else { + return false; + } + } + /** * Parse text field attributes. In addition to {@link #parseField common attributes} * this will parse analysis and term-vectors related settings. @@ -194,6 +222,14 @@ public class TypeParsers { public static void parseTextField(FieldMapper.Builder builder, String name, Map fieldNode, Mapper.TypeParser.ParserContext parserContext) { parseField(builder, name, fieldNode, parserContext); parseAnalyzersAndTermVectors(builder, name, fieldNode, parserContext); + for (Iterator> iterator = fieldNode.entrySet().iterator(); iterator.hasNext();) { + Map.Entry entry = iterator.next(); + final String propName = Strings.toUnderscoreCase(entry.getKey()); + final Object propNode = entry.getValue(); + if (parseNorms(builder, propName, propNode, parserContext)) { + iterator.remove(); + } + } } /** @@ -217,24 +253,8 @@ public class TypeParsers { } else if (propName.equals("boost")) { builder.boost(nodeFloatValue(propNode)); iterator.remove(); - } else if (propName.equals("omit_norms")) { - builder.omitNorms(nodeBooleanValue("omit_norms", propNode, parserContext)); - iterator.remove(); - } else if (propName.equals("norms")) { - final Map properties = nodeMapValue(propNode, "norms"); - for (Iterator> propsIterator = properties.entrySet().iterator(); propsIterator.hasNext();) { - Entry entry2 = propsIterator.next(); - final String propName2 = Strings.toUnderscoreCase(entry2.getKey()); - final Object propNode2 = entry2.getValue(); - if (propName2.equals("enabled")) { - builder.omitNorms(!lenientNodeBooleanValue(propNode2)); - propsIterator.remove(); - } else if (propName2.equals(Loading.KEY)) { - builder.normsLoading(Loading.parse(nodeStringValue(propNode2, null), null)); - propsIterator.remove(); - } - } - DocumentMapperParser.checkNoRemainingFields(propName, properties, parserContext.indexVersionCreated()); + } else if (parserContext.indexVersionCreated().before(Version.V_5_0_0) + && parseNorms(builder, propName, propNode, parserContext)) { iterator.remove(); } else if (propName.equals("index_options")) { builder.indexOptions(nodeIndexOptionValue(propNode)); @@ -256,7 +276,7 @@ public class TypeParsers { (indexVersionCreated.after(Version.V_2_0_1) && indexVersionCreated.before(Version.V_2_1_0))) { throw new MapperParsingException("copy_to in multi fields is not allowed. Found the copy_to in field [" + name + "] which is within a multi field."); } else { - ESLoggerFactory.getLogger("mapping [" + parserContext.type() + "]").warn("Found a copy_to in field [" + name + "] which is within a multi field. This feature has been removed and the copy_to will be removed from the mapping."); + ESLoggerFactory.getLogger("mapping [" + parserContext.type() + "]").warn("Found a copy_to in field [{}] which is within a multi field. This feature has been removed and the copy_to will be removed from the mapping.", name); } } else { parseCopyFields(propNode, builder); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java index 97c2fa3933b..7565243251c 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java @@ -305,7 +305,7 @@ public class AllFieldMapper extends MetadataFieldMapper { builder.field("store_term_vector_payloads", fieldType().storeTermVectorPayloads()); } if (includeDefaults || fieldType().omitNorms() != Defaults.FIELD_TYPE.omitNorms()) { - builder.field("omit_norms", fieldType().omitNorms()); + builder.field("norms", !fieldType().omitNorms()); } doXContentAnalyzers(builder, includeDefaults); diff --git a/core/src/main/java/org/elasticsearch/index/percolator/PercolatorQueriesRegistry.java b/core/src/main/java/org/elasticsearch/index/percolator/PercolatorQueriesRegistry.java index 67ba0aaf1d2..0a0cb9e96d9 100644 --- a/core/src/main/java/org/elasticsearch/index/percolator/PercolatorQueriesRegistry.java +++ b/core/src/main/java/org/elasticsearch/index/percolator/PercolatorQueriesRegistry.java @@ -31,13 +31,12 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.fielddata.IndexFieldDataService; -import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.mapper.internal.TypeFieldMapper; import org.elasticsearch.index.mapper.internal.UidFieldMapper; @@ -61,7 +60,8 @@ import java.util.concurrent.TimeUnit; */ public final class PercolatorQueriesRegistry extends AbstractIndexShardComponent implements Closeable { - public final static Setting INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING = Setting.boolSetting("index.percolator.map_unmapped_fields_as_string", false, false, Setting.Scope.INDEX); + public final static Setting INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING = + Setting.boolSetting("index.percolator.map_unmapped_fields_as_string", false, Property.IndexScope); private final ConcurrentMap percolateQueries = ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency(); private final QueryShardContext queryShardContext; diff --git a/core/src/main/java/org/elasticsearch/index/search/MatchQuery.java b/core/src/main/java/org/elasticsearch/index/search/MatchQuery.java index 979bfba605f..9cd587704cb 100644 --- a/core/src/main/java/org/elasticsearch/index/search/MatchQuery.java +++ b/core/src/main/java/org/elasticsearch/index/search/MatchQuery.java @@ -45,7 +45,6 @@ import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.support.QueryParsers; import java.io.IOException; -import java.util.List; public class MatchQuery { @@ -336,10 +335,10 @@ public class MatchQuery { return prefixQuery; } else if (query instanceof MultiPhraseQuery) { MultiPhraseQuery pq = (MultiPhraseQuery)query; - List terms = pq.getTermArrays(); + Term[][] terms = pq.getTermArrays(); int[] positions = pq.getPositions(); - for (int i = 0; i < terms.size(); i++) { - prefixQuery.add(terms.get(i), positions[i]); + for (int i = 0; i < terms.length; i++) { + prefixQuery.add(terms[i], positions[i]); } return prefixQuery; } else if (query instanceof TermQuery) { diff --git a/core/src/main/java/org/elasticsearch/index/shard/ElasticsearchMergePolicy.java b/core/src/main/java/org/elasticsearch/index/shard/ElasticsearchMergePolicy.java index 524266420fb..adae6caf452 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/ElasticsearchMergePolicy.java +++ b/core/src/main/java/org/elasticsearch/index/shard/ElasticsearchMergePolicy.java @@ -49,7 +49,7 @@ import java.util.Map; * be stored as payloads to numeric doc values. */ public final class ElasticsearchMergePolicy extends MergePolicy { - + private static ESLogger logger = Loggers.getLogger(ElasticsearchMergePolicy.class); private final MergePolicy delegate; @@ -69,7 +69,7 @@ public final class ElasticsearchMergePolicy extends MergePolicy { /** Return an "upgraded" view of the reader. */ static CodecReader filter(CodecReader reader) throws IOException { - // TODO: remove 0.90.x/1.x freqs/prox/payloads from _uid? + // TODO: remove 0.90.x/1.x freqs/prox/payloads from _uid? // the previous code never did this, so some indexes carry around trash. return reader; } @@ -155,7 +155,7 @@ public final class ElasticsearchMergePolicy extends MergePolicy { // TODO: Use IndexUpgradeMergePolicy instead. We should be comparing codecs, // for now we just assume every minor upgrade has a new format. - logger.debug("Adding segment " + info.info.name + " to be upgraded"); + logger.debug("Adding segment {} to be upgraded", info.info.name); spec.add(new OneMerge(Collections.singletonList(info))); } @@ -163,14 +163,14 @@ public final class ElasticsearchMergePolicy extends MergePolicy { if (spec.merges.size() == MAX_CONCURRENT_UPGRADE_MERGES) { // hit our max upgrades, so return the spec. we will get a cascaded call to continue. - logger.debug("Returning " + spec.merges.size() + " merges for upgrade"); + logger.debug("Returning {} merges for upgrade", spec.merges.size()); return spec; } } // We must have less than our max upgrade merges, so the next return will be our last in upgrading mode. if (spec.merges.isEmpty() == false) { - logger.debug("Returning " + spec.merges.size() + " merges for end of upgrade"); + logger.debug("Returning {} merges for end of upgrade", spec.merges.size()); return spec; } diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 4d35755e159..5e2df030001 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -712,7 +712,7 @@ public class IndexShard extends AbstractIndexShardComponent { false, true, upgrade.upgradeOnlyAncientSegments()); org.apache.lucene.util.Version version = minimumCompatibleVersion(); if (logger.isTraceEnabled()) { - logger.trace("upgraded segment {} from version {} to version {}", previousVersion, version); + logger.trace("upgraded segments for {} from version {} to version {}", shardId, previousVersion, version); } return version; diff --git a/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index e057349223d..d11e6734025 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -128,9 +128,8 @@ final class StoreRecovery { assert shardState != IndexShardState.CREATED && shardState != IndexShardState.RECOVERING : "recovery process of " + shardId + " didn't get to post_recovery. shardState [" + shardState + "]"; if (logger.isTraceEnabled()) { - StringBuilder sb = new StringBuilder(); - sb.append("recovery completed from ").append("shard_store").append(", took [").append(timeValueMillis(recoveryState.getTimer().time())).append("]\n"); RecoveryState.Index index = recoveryState.getIndex(); + StringBuilder sb = new StringBuilder(); sb.append(" index : files [").append(index.totalFileCount()).append("] with total_size [") .append(new ByteSizeValue(index.totalBytes())).append("], took[") .append(TimeValue.timeValueMillis(index.time())).append("]\n"); @@ -142,7 +141,7 @@ final class StoreRecovery { .append(timeValueMillis(recoveryState.getVerifyIndex().checkIndexTime())).append("]\n"); sb.append(" translog : number_of_operations [").append(recoveryState.getTranslog().recoveredOperations()) .append("], took [").append(TimeValue.timeValueMillis(recoveryState.getTranslog().time())).append("]"); - logger.trace(sb.toString()); + logger.trace("recovery completed from [shard_store], took [{}]\n{}", timeValueMillis(recoveryState.getTimer().time()), sb); } else if (logger.isDebugEnabled()) { logger.debug("recovery completed from [shard_store], took [{}]", timeValueMillis(recoveryState.getTimer().time())); } diff --git a/core/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java b/core/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java index 06bc6a84a88..933fd784588 100644 --- a/core/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java +++ b/core/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java @@ -36,7 +36,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; @@ -62,7 +62,7 @@ public class FsDirectoryService extends DirectoryService implements StoreRateLim default: throw new IllegalArgumentException("unrecognized [index.store.fs.fs_lock] \"" + s + "\": must be native or simple"); } - }, false, Setting.Scope.INDEX); + }, Property.IndexScope); private final CounterMetric rateLimitingTimeInNanos = new CounterMetric(); private final ShardPath path; diff --git a/core/src/main/java/org/elasticsearch/index/store/IndexStore.java b/core/src/main/java/org/elasticsearch/index/store/IndexStore.java index e98ad7cc6eb..9e01d871765 100644 --- a/core/src/main/java/org/elasticsearch/index/store/IndexStore.java +++ b/core/src/main/java/org/elasticsearch/index/store/IndexStore.java @@ -21,6 +21,7 @@ package org.elasticsearch.index.store; import org.apache.lucene.store.StoreRateLimiting; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.IndexSettings; @@ -29,8 +30,12 @@ import org.elasticsearch.index.shard.ShardPath; * */ public class IndexStore extends AbstractIndexComponent { - public static final Setting INDEX_STORE_THROTTLE_TYPE_SETTING = new Setting<>("index.store.throttle.type", "none", IndexRateLimitingType::fromString, true, Setting.Scope.INDEX) ; - public static final Setting INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING = Setting.byteSizeSetting("index.store.throttle.max_bytes_per_sec", new ByteSizeValue(0), true, Setting.Scope.INDEX); + public static final Setting INDEX_STORE_THROTTLE_TYPE_SETTING = + new Setting<>("index.store.throttle.type", "none", IndexRateLimitingType::fromString, + Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING = + Setting.byteSizeSetting("index.store.throttle.max_bytes_per_sec", new ByteSizeValue(0), + Property.Dynamic, Property.IndexScope); protected final IndexStoreConfig indexStoreConfig; private final StoreRateLimiting rateLimiting = new StoreRateLimiting(); diff --git a/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java b/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java index ab7075afa5b..12558bb9554 100644 --- a/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java +++ b/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java @@ -22,6 +22,7 @@ import org.apache.lucene.store.StoreRateLimiting; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; @@ -36,11 +37,15 @@ public class IndexStoreConfig { /** * Configures the node / cluster level throttle type. See {@link StoreRateLimiting.Type}. */ - public static final Setting INDICES_STORE_THROTTLE_TYPE_SETTING = new Setting<>("indices.store.throttle.type", StoreRateLimiting.Type.NONE.name(),StoreRateLimiting.Type::fromString, true, Setting.Scope.CLUSTER); + public static final Setting INDICES_STORE_THROTTLE_TYPE_SETTING = + new Setting<>("indices.store.throttle.type", StoreRateLimiting.Type.NONE.name(),StoreRateLimiting.Type::fromString, + Property.Dynamic, Property.NodeScope); /** * Configures the node / cluster level throttle intensity. The default is 10240 MB */ - public static final Setting INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING = Setting.byteSizeSetting("indices.store.throttle.max_bytes_per_sec", new ByteSizeValue(0), true, Setting.Scope.CLUSTER); + public static final Setting INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING = + Setting.byteSizeSetting("indices.store.throttle.max_bytes_per_sec", new ByteSizeValue(0), + Property.Dynamic, Property.NodeScope); private volatile StoreRateLimiting.Type rateLimitingType; private volatile ByteSizeValue rateLimitingThrottle; private final StoreRateLimiting rateLimiting = new StoreRateLimiting(); diff --git a/core/src/main/java/org/elasticsearch/index/store/Store.java b/core/src/main/java/org/elasticsearch/index/store/Store.java index 77e7f32f5f5..e0ed3bc98b7 100644 --- a/core/src/main/java/org/elasticsearch/index/store/Store.java +++ b/core/src/main/java/org/elasticsearch/index/store/Store.java @@ -49,7 +49,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; @@ -61,6 +60,7 @@ import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.store.ByteArrayIndexInput; import org.elasticsearch.common.lucene.store.InputStreamIndexInput; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.Callback; @@ -90,7 +90,6 @@ import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.zip.Adler32; import java.util.zip.CRC32; import java.util.zip.Checksum; @@ -124,7 +123,8 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref static final int VERSION_START = 0; static final int VERSION = VERSION_WRITE_THROWABLE; static final String CORRUPTED = "corrupted_"; - public static final Setting INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING = Setting.timeSetting("index.store.stats_refresh_interval", TimeValue.timeValueSeconds(10), false, Setting.Scope.INDEX); + public static final Setting INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING = + Setting.timeSetting("index.store.stats_refresh_interval", TimeValue.timeValueSeconds(10), Property.IndexScope); private final AtomicBoolean isClosed = new AtomicBoolean(false); private final StoreDirectory directory; @@ -379,7 +379,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref if (isClosed.compareAndSet(false, true)) { // only do this once! decRef(); - logger.debug("store reference count on close: " + refCounter.refCount()); + logger.debug("store reference count on close: {}", refCounter.refCount()); } } diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java b/core/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java index 926ff482248..bd01e7f0183 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java @@ -30,10 +30,9 @@ import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.ShardCoreKeyMap; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Scope; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.index.cache.query.QueryCacheStats; @@ -50,9 +49,9 @@ import java.util.concurrent.ConcurrentHashMap; public class IndicesQueryCache extends AbstractComponent implements QueryCache, Closeable { public static final Setting INDICES_CACHE_QUERY_SIZE_SETTING = Setting.byteSizeSetting( - "indices.queries.cache.size", "10%", false, Scope.CLUSTER); + "indices.queries.cache.size", "10%", Property.NodeScope); public static final Setting INDICES_CACHE_QUERY_COUNT_SETTING = Setting.intSetting( - "indices.queries.cache.count", 10000, 1, false, Scope.CLUSTER); + "indices.queries.cache.count", 10000, 1, Property.NodeScope); private final LRUQueryCache cache; private final ShardCoreKeyMap shardKeyMap = new ShardCoreKeyMap(); diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java b/core/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java index 32b5f55b369..9129a3b1360 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java @@ -34,6 +34,7 @@ import org.elasticsearch.common.cache.RemovalNotification; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; @@ -68,12 +69,12 @@ public final class IndicesRequestCache extends AbstractComponent implements Remo * A setting to enable or disable request caching on an index level. Its dynamic by default * since we are checking on the cluster state IndexMetaData always. */ - public static final Setting INDEX_CACHE_REQUEST_ENABLED_SETTING = Setting.boolSetting("index.requests.cache.enable", - false, true, Setting.Scope.INDEX); - public static final Setting INDICES_CACHE_QUERY_SIZE = Setting.byteSizeSetting("indices.requests.cache.size", "1%", - false, Setting.Scope.CLUSTER); - public static final Setting INDICES_CACHE_QUERY_EXPIRE = Setting.positiveTimeSetting("indices.requests.cache.expire", - new TimeValue(0), false, Setting.Scope.CLUSTER); + public static final Setting INDEX_CACHE_REQUEST_ENABLED_SETTING = + Setting.boolSetting("index.requests.cache.enable", false, Property.Dynamic, Property.IndexScope); + public static final Setting INDICES_CACHE_QUERY_SIZE = + Setting.byteSizeSetting("indices.requests.cache.size", "1%", Property.NodeScope); + public static final Setting INDICES_CACHE_QUERY_EXPIRE = + Setting.positiveTimeSetting("indices.requests.cache.expire", new TimeValue(0), Property.NodeScope); private final ConcurrentMap registeredClosedListeners = ConcurrentCollections.newConcurrentMap(); private final Set keysToClean = ConcurrentCollections.newConcurrentSet(); diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesService.java b/core/src/main/java/org/elasticsearch/indices/IndicesService.java index 6fd833471ed..0c79f7d701d 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -47,6 +47,7 @@ import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsExecutors; @@ -116,7 +117,8 @@ import static org.elasticsearch.common.util.CollectionUtils.arrayAsArrayList; public class IndicesService extends AbstractLifecycleComponent implements Iterable, IndexService.ShardStoreDeleter { public static final String INDICES_SHARDS_CLOSED_TIMEOUT = "indices.shards_closed_timeout"; - public static final Setting INDICES_CACHE_CLEAN_INTERVAL_SETTING = Setting.positiveTimeSetting("indices.cache.cleanup_interval", TimeValue.timeValueMinutes(1), false, Setting.Scope.CLUSTER); + public static final Setting INDICES_CACHE_CLEAN_INTERVAL_SETTING = + Setting.positiveTimeSetting("indices.cache.cleanup_interval", TimeValue.timeValueMinutes(1), Property.NodeScope); private final PluginsService pluginsService; private final NodeEnvironment nodeEnv; private final TimeValue shardsClosedTimeout; @@ -193,7 +195,7 @@ public class IndicesService extends AbstractLifecycleComponent i try { removeIndex(index, "shutdown", false); } catch (Throwable e) { - logger.warn("failed to remove index on stop " + index + "", e); + logger.warn("failed to remove index on stop [{}]", e, index); } finally { latch.countDown(); } diff --git a/core/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java b/core/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java index 5d2fb761842..75c15f09778 100644 --- a/core/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java +++ b/core/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java @@ -26,6 +26,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; @@ -73,9 +74,12 @@ import java.util.function.Function; */ public class HunspellService extends AbstractComponent { - public final static Setting HUNSPELL_LAZY_LOAD = Setting.boolSetting("indices.analysis.hunspell.dictionary.lazy", Boolean.FALSE, false, Setting.Scope.CLUSTER); - public final static Setting HUNSPELL_IGNORE_CASE = Setting.boolSetting("indices.analysis.hunspell.dictionary.ignore_case", Boolean.FALSE, false, Setting.Scope.CLUSTER); - public final static Setting HUNSPELL_DICTIONARY_OPTIONS = Setting.groupSetting("indices.analysis.hunspell.dictionary.", false, Setting.Scope.CLUSTER); + public final static Setting HUNSPELL_LAZY_LOAD = + Setting.boolSetting("indices.analysis.hunspell.dictionary.lazy", Boolean.FALSE, Property.NodeScope); + public final static Setting HUNSPELL_IGNORE_CASE = + Setting.boolSetting("indices.analysis.hunspell.dictionary.ignore_case", Boolean.FALSE, Property.NodeScope); + public final static Setting HUNSPELL_DICTIONARY_OPTIONS = + Setting.groupSetting("indices.analysis.hunspell.dictionary.", Property.NodeScope); private final ConcurrentHashMap dictionaries = new ConcurrentHashMap<>(); private final Map knownDictionaries; private final boolean defaultIgnoreCase; diff --git a/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java b/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java index 0e1532bc6b3..d2d96092186 100644 --- a/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java +++ b/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; @@ -46,15 +47,22 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService { private final ConcurrentMap breakers = new ConcurrentHashMap(); - public static final Setting TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING = Setting.byteSizeSetting("indices.breaker.total.limit", "70%", true, Setting.Scope.CLUSTER); + public static final Setting TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING = + Setting.byteSizeSetting("indices.breaker.total.limit", "70%", Property.Dynamic, Property.NodeScope); - public static final Setting FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING = Setting.byteSizeSetting("indices.breaker.fielddata.limit", "60%", true, Setting.Scope.CLUSTER); - public static final Setting FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING = Setting.doubleSetting("indices.breaker.fielddata.overhead", 1.03d, 0.0d, true, Setting.Scope.CLUSTER); - public static final Setting FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING = new Setting<>("indices.breaker.fielddata.type", "memory", CircuitBreaker.Type::parseValue, false, Setting.Scope.CLUSTER); + public static final Setting FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING = + Setting.byteSizeSetting("indices.breaker.fielddata.limit", "60%", Property.Dynamic, Property.NodeScope); + public static final Setting FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING = + Setting.doubleSetting("indices.breaker.fielddata.overhead", 1.03d, 0.0d, Property.Dynamic, Property.NodeScope); + public static final Setting FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING = + new Setting<>("indices.breaker.fielddata.type", "memory", CircuitBreaker.Type::parseValue, Property.NodeScope); - public static final Setting REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING = Setting.byteSizeSetting("indices.breaker.request.limit", "40%", true, Setting.Scope.CLUSTER); - public static final Setting REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING = Setting.doubleSetting("indices.breaker.request.overhead", 1.0d, 0.0d, true, Setting.Scope.CLUSTER); - public static final Setting REQUEST_CIRCUIT_BREAKER_TYPE_SETTING = new Setting<>("indices.breaker.request.type", "memory", CircuitBreaker.Type::parseValue, false, Setting.Scope.CLUSTER); + public static final Setting REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING = + Setting.byteSizeSetting("indices.breaker.request.limit", "40%", Property.Dynamic, Property.NodeScope); + public static final Setting REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING = + Setting.doubleSetting("indices.breaker.request.overhead", 1.0d, 0.0d, Property.Dynamic, Property.NodeScope); + public static final Setting REQUEST_CIRCUIT_BREAKER_TYPE_SETTING = + new Setting<>("indices.breaker.request.type", "memory", CircuitBreaker.Type::parseValue, Property.NodeScope); diff --git a/core/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java b/core/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java index 0a3f063dfcc..46744f4d848 100644 --- a/core/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java +++ b/core/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java @@ -34,6 +34,7 @@ import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.index.Index; @@ -52,7 +53,8 @@ import java.util.function.ToLongBiFunction; */ public class IndicesFieldDataCache extends AbstractComponent implements RemovalListener, Releasable{ - public static final Setting INDICES_FIELDDATA_CACHE_SIZE_KEY = Setting.byteSizeSetting("indices.fielddata.cache.size", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER); + public static final Setting INDICES_FIELDDATA_CACHE_SIZE_KEY = + Setting.byteSizeSetting("indices.fielddata.cache.size", new ByteSizeValue(-1), Property.NodeScope); private final IndexFieldDataCache.Listener indicesFieldDataCacheListener; private final Cache cache; diff --git a/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java b/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java index 6eb7c88a2a4..f3937fc0b66 100644 --- a/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java +++ b/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java @@ -309,7 +309,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL } final Engine.CommitId expectedCommitId = expectedCommitIds.get(shard.currentNodeId()); if (expectedCommitId == null) { - logger.trace("{} can't resolve expected commit id for {}, skipping for sync id [{}]. shard routing {}", shardId, syncId, shard); + logger.trace("{} can't resolve expected commit id for current node, skipping for sync id [{}]. shard routing {}", shardId, syncId, shard); results.put(shard, new ShardSyncedFlushResponse("no commit id from pre-sync flush")); contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results); continue; diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java index 24f87ee436f..8494939e46d 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java @@ -238,7 +238,7 @@ public class RecoveriesCollection { return; } lastSeenAccessTime = accessTime; - logger.trace("[monitor] rescheduling check for [{}]. last access time is [{}]", lastSeenAccessTime); + logger.trace("[monitor] rescheduling check for [{}]. last access time is [{}]", recoveryId, lastSeenAccessTime); threadPool.schedule(checkInterval, ThreadPool.Names.GENERIC, this); } } diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java index 8d610dce05b..82595458479 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -32,31 +33,45 @@ import org.elasticsearch.common.unit.TimeValue; public class RecoverySettings extends AbstractComponent { - public static final Setting INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING = Setting.byteSizeSetting("indices.recovery.max_bytes_per_sec", new ByteSizeValue(40, ByteSizeUnit.MB), true, Setting.Scope.CLUSTER); + public static final Setting INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING = + Setting.byteSizeSetting("indices.recovery.max_bytes_per_sec", new ByteSizeValue(40, ByteSizeUnit.MB), + Property.Dynamic, Property.NodeScope); /** * how long to wait before retrying after issues cause by cluster state syncing between nodes * i.e., local node is not yet known on remote node, remote shard not yet started etc. */ - public static final Setting INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING = Setting.positiveTimeSetting("indices.recovery.retry_delay_state_sync", TimeValue.timeValueMillis(500), true, Setting.Scope.CLUSTER); + public static final Setting INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING = + Setting.positiveTimeSetting("indices.recovery.retry_delay_state_sync", TimeValue.timeValueMillis(500), + Property.Dynamic, Property.NodeScope); /** how long to wait before retrying after network related issues */ - public static final Setting INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING = Setting.positiveTimeSetting("indices.recovery.retry_delay_network", TimeValue.timeValueSeconds(5), true, Setting.Scope.CLUSTER); + public static final Setting INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING = + Setting.positiveTimeSetting("indices.recovery.retry_delay_network", TimeValue.timeValueSeconds(5), + Property.Dynamic, Property.NodeScope); /** timeout value to use for requests made as part of the recovery process */ - public static final Setting INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING = Setting.positiveTimeSetting("indices.recovery.internal_action_timeout", TimeValue.timeValueMinutes(15), true, Setting.Scope.CLUSTER); + public static final Setting INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING = + Setting.positiveTimeSetting("indices.recovery.internal_action_timeout", TimeValue.timeValueMinutes(15), + Property.Dynamic, Property.NodeScope); /** * timeout value to use for requests made as part of the recovery process that are expected to take long time. * defaults to twice `indices.recovery.internal_action_timeout`. */ - public static final Setting INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING = Setting.timeSetting("indices.recovery.internal_action_long_timeout", (s) -> TimeValue.timeValueMillis(INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.get(s).millis() * 2).toString(), TimeValue.timeValueSeconds(0), true, Setting.Scope.CLUSTER); + public static final Setting INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING = + Setting.timeSetting("indices.recovery.internal_action_long_timeout", + (s) -> TimeValue.timeValueMillis(INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.get(s).millis() * 2).toString(), + TimeValue.timeValueSeconds(0), Property.Dynamic, Property.NodeScope); /** * recoveries that don't show any activity for more then this interval will be failed. * defaults to `indices.recovery.internal_action_long_timeout` */ - public static final Setting INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING = Setting.timeSetting("indices.recovery.recovery_activity_timeout", (s) -> INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING.getRaw(s) , TimeValue.timeValueSeconds(0), true, Setting.Scope.CLUSTER); + public static final Setting INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING = + Setting.timeSetting("indices.recovery.recovery_activity_timeout", + (s) -> INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING.getRaw(s) , TimeValue.timeValueSeconds(0), + Property.Dynamic, Property.NodeScope); public static final ByteSizeValue DEFAULT_CHUNK_SIZE = new ByteSizeValue(512, ByteSizeUnit.KB); diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 15b9b59dd28..b609eb5d08a 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -289,7 +289,7 @@ public class RecoverySourceHandler { RemoteTransportException exception = new RemoteTransportException("File corruption occurred on recovery but " + "checksums are ok", null); exception.addSuppressed(targetException); - logger.warn("{} Remote file corruption during finalization on node {}, recovering {}. local checksum OK", + logger.warn("{} Remote file corruption during finalization of recovery on node {}. local checksum OK", corruptIndexException, shard.shardId(), request.targetNode()); throw exception; } else { diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetService.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetService.java index dcbb0c7bedf..ab8c87cd636 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetService.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetService.java @@ -218,7 +218,7 @@ public class RecoveryTargetService extends AbstractComponent implements IndexEve "operations") .append(", took [").append(timeValueMillis(recoveryResponse.phase2Time)).append("]") .append("\n"); - logger.trace(sb.toString()); + logger.trace("{}", sb); } else { logger.debug("{} recovery done from [{}], took [{}]", request.shardId(), recoveryTarget.sourceNode(), recoveryTime); } diff --git a/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java b/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java index 6e9859efb2e..6c09a608c2d 100644 --- a/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java +++ b/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java @@ -36,6 +36,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; @@ -57,7 +58,6 @@ import org.elasticsearch.transport.TransportService; import java.io.Closeable; import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.EnumSet; import java.util.List; import java.util.concurrent.TimeUnit; @@ -69,7 +69,9 @@ import java.util.concurrent.atomic.AtomicInteger; public class IndicesStore extends AbstractComponent implements ClusterStateListener, Closeable { // TODO this class can be foled into either IndicesService and partially into IndicesClusterStateService there is no need for a separate public service - public static final Setting INDICES_STORE_DELETE_SHARD_TIMEOUT = Setting.positiveTimeSetting("indices.store.delete.shard.timeout", new TimeValue(30, TimeUnit.SECONDS), false, Setting.Scope.CLUSTER); + public static final Setting INDICES_STORE_DELETE_SHARD_TIMEOUT = + Setting.positiveTimeSetting("indices.store.delete.shard.timeout", new TimeValue(30, TimeUnit.SECONDS), + Property.NodeScope); public static final String ACTION_SHARD_EXISTS = "internal:index/shard/exists"; private static final EnumSet ACTIVE_STATES = EnumSet.of(IndexShardState.STARTED, IndexShardState.RELOCATED); private final IndicesService indicesService; diff --git a/core/src/main/java/org/elasticsearch/indices/ttl/IndicesTTLService.java b/core/src/main/java/org/elasticsearch/indices/ttl/IndicesTTLService.java index e4537b876fa..8576a3019af 100644 --- a/core/src/main/java/org/elasticsearch/indices/ttl/IndicesTTLService.java +++ b/core/src/main/java/org/elasticsearch/indices/ttl/IndicesTTLService.java @@ -38,6 +38,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsExecutors; @@ -67,7 +68,9 @@ import java.util.concurrent.locks.ReentrantLock; */ public class IndicesTTLService extends AbstractLifecycleComponent { - public static final Setting INDICES_TTL_INTERVAL_SETTING = Setting.positiveTimeSetting("indices.ttl.interval", TimeValue.timeValueSeconds(60), true, Setting.Scope.CLUSTER); + public static final Setting INDICES_TTL_INTERVAL_SETTING = + Setting.positiveTimeSetting("indices.ttl.interval", TimeValue.timeValueSeconds(60), + Property.Dynamic, Property.NodeScope); private final ClusterService clusterService; private final IndicesService indicesService; @@ -287,7 +290,7 @@ public class IndicesTTLService extends AbstractLifecycleComponent fsStatsCache; public final static Setting REFRESH_INTERVAL_SETTING = - Setting.timeSetting("monitor.fs.refresh_interval", TimeValue.timeValueSeconds(1), TimeValue.timeValueSeconds(1), false, Setting.Scope.CLUSTER); + Setting.timeSetting("monitor.fs.refresh_interval", TimeValue.timeValueSeconds(1), TimeValue.timeValueSeconds(1), + Property.NodeScope); public FsService(Settings settings, NodeEnvironment nodeEnvironment) throws IOException { super(settings); diff --git a/core/src/main/java/org/elasticsearch/monitor/jvm/JvmGcMonitorService.java b/core/src/main/java/org/elasticsearch/monitor/jvm/JvmGcMonitorService.java index 97c813a0fe3..5a2d591c7dc 100644 --- a/core/src/main/java/org/elasticsearch/monitor/jvm/JvmGcMonitorService.java +++ b/core/src/main/java/org/elasticsearch/monitor/jvm/JvmGcMonitorService.java @@ -21,7 +21,7 @@ package org.elasticsearch.monitor.jvm; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Scope; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.FutureUtils; @@ -47,12 +47,14 @@ public class JvmGcMonitorService extends AbstractLifecycleComponent ENABLED_SETTING = Setting.boolSetting("monitor.jvm.gc.enabled", true, false, Scope.CLUSTER); + public final static Setting ENABLED_SETTING = + Setting.boolSetting("monitor.jvm.gc.enabled", true, Property.NodeScope); public final static Setting REFRESH_INTERVAL_SETTING = - Setting.timeSetting("monitor.jvm.gc.refresh_interval", TimeValue.timeValueSeconds(1), TimeValue.timeValueSeconds(1), false, Scope.CLUSTER); + Setting.timeSetting("monitor.jvm.gc.refresh_interval", TimeValue.timeValueSeconds(1), TimeValue.timeValueSeconds(1), + Property.NodeScope); private static String GC_COLLECTOR_PREFIX = "monitor.jvm.gc.collector."; - public final static Setting GC_SETTING = Setting.groupSetting(GC_COLLECTOR_PREFIX, false, Scope.CLUSTER); + public final static Setting GC_SETTING = Setting.groupSetting(GC_COLLECTOR_PREFIX, Property.NodeScope); static class GcThreshold { public final String name; diff --git a/core/src/main/java/org/elasticsearch/monitor/jvm/JvmService.java b/core/src/main/java/org/elasticsearch/monitor/jvm/JvmService.java index fbec6cda168..e91c05e75ac 100644 --- a/core/src/main/java/org/elasticsearch/monitor/jvm/JvmService.java +++ b/core/src/main/java/org/elasticsearch/monitor/jvm/JvmService.java @@ -21,6 +21,7 @@ package org.elasticsearch.monitor.jvm; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -36,7 +37,8 @@ public class JvmService extends AbstractComponent { private JvmStats jvmStats; public final static Setting REFRESH_INTERVAL_SETTING = - Setting.timeSetting("monitor.jvm.refresh_interval", TimeValue.timeValueSeconds(1), TimeValue.timeValueSeconds(1), false, Setting.Scope.CLUSTER); + Setting.timeSetting("monitor.jvm.refresh_interval", TimeValue.timeValueSeconds(1), TimeValue.timeValueSeconds(1), + Property.NodeScope); public JvmService(Settings settings) { super(settings); diff --git a/core/src/main/java/org/elasticsearch/monitor/os/OsService.java b/core/src/main/java/org/elasticsearch/monitor/os/OsService.java index 5f836c6f928..d452094d7b0 100644 --- a/core/src/main/java/org/elasticsearch/monitor/os/OsService.java +++ b/core/src/main/java/org/elasticsearch/monitor/os/OsService.java @@ -21,6 +21,7 @@ package org.elasticsearch.monitor.os; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.SingleObjectCache; @@ -38,7 +39,8 @@ public class OsService extends AbstractComponent { private SingleObjectCache osStatsCache; public final static Setting REFRESH_INTERVAL_SETTING = - Setting.timeSetting("monitor.os.refresh_interval", TimeValue.timeValueSeconds(1), TimeValue.timeValueSeconds(1), false, Setting.Scope.CLUSTER); + Setting.timeSetting("monitor.os.refresh_interval", TimeValue.timeValueSeconds(1), TimeValue.timeValueSeconds(1), + Property.NodeScope); public OsService(Settings settings) { super(settings); diff --git a/core/src/main/java/org/elasticsearch/monitor/process/ProcessService.java b/core/src/main/java/org/elasticsearch/monitor/process/ProcessService.java index 9e3283af4fc..30c24f34c66 100644 --- a/core/src/main/java/org/elasticsearch/monitor/process/ProcessService.java +++ b/core/src/main/java/org/elasticsearch/monitor/process/ProcessService.java @@ -21,6 +21,7 @@ package org.elasticsearch.monitor.process; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.SingleObjectCache; @@ -35,7 +36,8 @@ public final class ProcessService extends AbstractComponent { private final SingleObjectCache processStatsCache; public final static Setting REFRESH_INTERVAL_SETTING = - Setting.timeSetting("monitor.process.refresh_interval", TimeValue.timeValueSeconds(1), TimeValue.timeValueSeconds(1), false, Setting.Scope.CLUSTER); + Setting.timeSetting("monitor.process.refresh_interval", TimeValue.timeValueSeconds(1), TimeValue.timeValueSeconds(1), + Property.NodeScope); public ProcessService(Settings settings) { super(settings); diff --git a/core/src/main/java/org/elasticsearch/node/Node.java b/core/src/main/java/org/elasticsearch/node/Node.java index b995723127a..3388fb1e2c0 100644 --- a/core/src/main/java/org/elasticsearch/node/Node.java +++ b/core/src/main/java/org/elasticsearch/node/Node.java @@ -55,6 +55,7 @@ import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.common.transport.BoundTransportAddress; @@ -131,17 +132,23 @@ import static org.elasticsearch.common.settings.Settings.settingsBuilder; */ public class Node implements Closeable { - public static final Setting WRITE_PORTS_FIELD_SETTING = Setting.boolSetting("node.portsfile", false, false, Setting.Scope.CLUSTER); - public static final Setting NODE_CLIENT_SETTING = Setting.boolSetting("node.client", false, false, Setting.Scope.CLUSTER); - public static final Setting NODE_DATA_SETTING = Setting.boolSetting("node.data", true, false, Setting.Scope.CLUSTER); - public static final Setting NODE_MASTER_SETTING = Setting.boolSetting("node.master", true, false, Setting.Scope.CLUSTER); - public static final Setting NODE_LOCAL_SETTING = Setting.boolSetting("node.local", false, false, Setting.Scope.CLUSTER); - public static final Setting NODE_MODE_SETTING = new Setting<>("node.mode", "network", Function.identity(), false, Setting.Scope.CLUSTER); - public static final Setting NODE_INGEST_SETTING = Setting.boolSetting("node.ingest", true, false, Setting.Scope.CLUSTER); - public static final Setting NODE_NAME_SETTING = Setting.simpleString("node.name", false, Setting.Scope.CLUSTER); + public static final Setting WRITE_PORTS_FIELD_SETTING = + Setting.boolSetting("node.portsfile", false, Property.NodeScope); + public static final Setting NODE_CLIENT_SETTING = + Setting.boolSetting("node.client", false, Property.NodeScope); + public static final Setting NODE_DATA_SETTING = Setting.boolSetting("node.data", true, Property.NodeScope); + public static final Setting NODE_MASTER_SETTING = + Setting.boolSetting("node.master", true, Property.NodeScope); + public static final Setting NODE_LOCAL_SETTING = + Setting.boolSetting("node.local", false, Property.NodeScope); + public static final Setting NODE_MODE_SETTING = + new Setting<>("node.mode", "network", Function.identity(), Property.NodeScope); + public static final Setting NODE_INGEST_SETTING = + Setting.boolSetting("node.ingest", true, Property.NodeScope); + public static final Setting NODE_NAME_SETTING = Setting.simpleString("node.name", Property.NodeScope); // this sucks that folks can mistype client etc and get away with it. // TODO: we should move this to node.attribute.${name} = ${value} instead. - public static final Setting NODE_ATTRIBUTES = Setting.groupSetting("node.", false, Setting.Scope.CLUSTER); + public static final Setting NODE_ATTRIBUTES = Setting.groupSetting("node.", Property.NodeScope); private static final String CLIENT_TYPE = "node"; diff --git a/core/src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java b/core/src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java index faf449586c1..b1ad3a3239f 100644 --- a/core/src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java +++ b/core/src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java @@ -23,9 +23,10 @@ import org.elasticsearch.bootstrap.BootstrapInfo; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.cli.Terminal; +import org.elasticsearch.cli.Terminal; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.env.Environment; @@ -57,7 +58,8 @@ public class InternalSettingsPreparer { public static final String SECRET_PROMPT_VALUE = "${prompt.secret}"; public static final String TEXT_PROMPT_VALUE = "${prompt.text}"; - public static final Setting IGNORE_SYSTEM_PROPERTIES_SETTING = Setting.boolSetting("config.ignore_system_properties", false, false, Setting.Scope.CLUSTER); + public static final Setting IGNORE_SYSTEM_PROPERTIES_SETTING = + Setting.boolSetting("config.ignore_system_properties", false, Property.NodeScope); /** * Prepares the settings by gathering all elasticsearch system properties and setting defaults. diff --git a/core/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java b/core/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java index 767f6d42179..e72eb2100f6 100644 --- a/core/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java +++ b/core/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java @@ -19,16 +19,18 @@ package org.elasticsearch.plugins; +import joptsimple.OptionSet; +import joptsimple.OptionSpec; import org.apache.lucene.util.IOUtils; import org.elasticsearch.Build; import org.elasticsearch.Version; import org.elasticsearch.bootstrap.JarHell; -import org.elasticsearch.common.cli.CliTool; -import org.elasticsearch.common.cli.Terminal; -import org.elasticsearch.common.cli.UserError; +import org.elasticsearch.cli.Command; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.UserError; import org.elasticsearch.common.hash.MessageDigests; import org.elasticsearch.common.io.FileSystemUtils; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import java.io.BufferedReader; @@ -48,6 +50,7 @@ import java.nio.file.attribute.PosixFilePermission; import java.util.ArrayList; import java.util.Arrays; import java.util.HashSet; +import java.util.LinkedHashSet; import java.util.List; import java.util.Locale; import java.util.Set; @@ -55,7 +58,7 @@ import java.util.zip.ZipEntry; import java.util.zip.ZipInputStream; import static java.util.Collections.unmodifiableSet; -import static org.elasticsearch.common.cli.Terminal.Verbosity.VERBOSE; +import static org.elasticsearch.cli.Terminal.Verbosity.VERBOSE; import static org.elasticsearch.common.util.set.Sets.newHashSet; /** @@ -88,7 +91,7 @@ import static org.elasticsearch.common.util.set.Sets.newHashSet; * elasticsearch config directory, using the name of the plugin. If any files to be installed * already exist, they will be skipped. */ -class InstallPluginCommand extends CliTool.Command { +class InstallPluginCommand extends Command { private static final String PROPERTY_SUPPORT_STAGING_URLS = "es.plugins.staging"; @@ -98,7 +101,7 @@ class InstallPluginCommand extends CliTool.Command { "lang-groovy")); // TODO: make this a resource file generated by gradle - static final Set OFFICIAL_PLUGINS = unmodifiableSet(newHashSet( + static final Set OFFICIAL_PLUGINS = unmodifiableSet(new LinkedHashSet<>(Arrays.asList( "analysis-icu", "analysis-kuromoji", "analysis-phonetic", @@ -117,19 +120,43 @@ class InstallPluginCommand extends CliTool.Command { "repository-azure", "repository-hdfs", "repository-s3", - "store-smb")); + "store-smb"))); - private final String pluginId; - private final boolean batch; + private final Environment env; + private final OptionSpec batchOption; + private final OptionSpec arguments; - InstallPluginCommand(Terminal terminal, String pluginId, boolean batch) { - super(terminal); - this.pluginId = pluginId; - this.batch = batch; + InstallPluginCommand(Environment env) { + super("Install a plugin"); + this.env = env; + this.batchOption = parser.acceptsAll(Arrays.asList("b", "batch"), + "Enable batch mode explicitly, automatic confirmation of security permission"); + this.arguments = parser.nonOptions("plugin id"); } @Override - public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception { + protected void printAdditionalHelp(Terminal terminal) { + terminal.println("The following official plugins may be installed by name:"); + for (String plugin : OFFICIAL_PLUGINS) { + terminal.println(" " + plugin); + } + terminal.println(""); + } + + @Override + protected void execute(Terminal terminal, OptionSet options) throws Exception { + // TODO: in jopt-simple 5.0 we can enforce a min/max number of positional args + List args = arguments.values(options); + if (args.size() != 1) { + throw new UserError(ExitCodes.USAGE, "Must supply a single plugin id argument"); + } + String pluginId = args.get(0); + boolean isBatch = options.has(batchOption) || System.console() == null; + execute(terminal, pluginId, isBatch); + } + + // pkg private for testing + void execute(Terminal terminal, String pluginId, boolean isBatch) throws Exception { // TODO: remove this leniency!! is it needed anymore? if (Files.exists(env.pluginsFile()) == false) { @@ -137,15 +164,13 @@ class InstallPluginCommand extends CliTool.Command { Files.createDirectory(env.pluginsFile()); } - Path pluginZip = download(pluginId, env.tmpFile()); + Path pluginZip = download(terminal, pluginId, env.tmpFile()); Path extractedZip = unzip(pluginZip, env.pluginsFile()); - install(extractedZip, env); - - return CliTool.ExitStatus.OK; + install(terminal, isBatch, extractedZip); } /** Downloads the plugin and returns the file it was downloaded to. */ - private Path download(String pluginId, Path tmpDir) throws Exception { + private Path download(Terminal terminal, String pluginId, Path tmpDir) throws Exception { if (OFFICIAL_PLUGINS.contains(pluginId)) { final String version = Version.CURRENT.toString(); final String url; @@ -195,14 +220,14 @@ class InstallPluginCommand extends CliTool.Command { BufferedReader checksumReader = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8)); expectedChecksum = checksumReader.readLine(); if (checksumReader.readLine() != null) { - throw new UserError(CliTool.ExitStatus.IO_ERROR, "Invalid checksum file at " + checksumUrl); + throw new UserError(ExitCodes.IO_ERROR, "Invalid checksum file at " + checksumUrl); } } byte[] zipbytes = Files.readAllBytes(zip); String gotChecksum = MessageDigests.toHexString(MessageDigests.sha1().digest(zipbytes)); if (expectedChecksum.equals(gotChecksum) == false) { - throw new UserError(CliTool.ExitStatus.IO_ERROR, "SHA1 mismatch, expected " + expectedChecksum + " but got " + gotChecksum); + throw new UserError(ExitCodes.IO_ERROR, "SHA1 mismatch, expected " + expectedChecksum + " but got " + gotChecksum); } return zip; @@ -225,7 +250,14 @@ class InstallPluginCommand extends CliTool.Command { } hasEsDir = true; Path targetFile = target.resolve(entry.getName().substring("elasticsearch/".length())); - // TODO: handle name being an absolute path + + // Using the entry name as a path can result in an entry outside of the plugin dir, either if the + // name starts with the root of the filesystem, or it is a relative entry like ../whatever. + // This check attempts to identify both cases by first normalizing the path (which removes foo/..) + // and ensuring the normalized entry is still rooted with the target plugin directory. + if (targetFile.normalize().startsWith(target) == false) { + throw new IOException("Zip contains entry name '" + entry.getName() + "' resolving outside of plugin directory"); + } // be on the safe side: do not rely on that directories are always extracted // before their children (although this makes sense, but is it guaranteed?) @@ -244,13 +276,13 @@ class InstallPluginCommand extends CliTool.Command { Files.delete(zip); if (hasEsDir == false) { IOUtils.rm(target); - throw new UserError(CliTool.ExitStatus.DATA_ERROR, "`elasticsearch` directory is missing in the plugin zip"); + throw new UserError(ExitCodes.DATA_ERROR, "`elasticsearch` directory is missing in the plugin zip"); } return target; } /** Load information about the plugin, and verify it can be installed with no errors. */ - private PluginInfo verify(Path pluginRoot, Environment env) throws Exception { + private PluginInfo verify(Terminal terminal, Path pluginRoot, boolean isBatch) throws Exception { // read and validate the plugin descriptor PluginInfo info = PluginInfo.readFromProperties(pluginRoot); terminal.println(VERBOSE, info.toString()); @@ -258,7 +290,7 @@ class InstallPluginCommand extends CliTool.Command { // don't let luser install plugin as a module... // they might be unavoidably in maven central and are packaged up the same way) if (MODULES.contains(info.getName())) { - throw new UserError(CliTool.ExitStatus.USAGE, "plugin '" + info.getName() + "' cannot be installed like this, it is a system module"); + throw new UserError(ExitCodes.USAGE, "plugin '" + info.getName() + "' cannot be installed like this, it is a system module"); } // check for jar hell before any copying @@ -268,7 +300,7 @@ class InstallPluginCommand extends CliTool.Command { // if it exists, confirm or warn the user Path policy = pluginRoot.resolve(PluginInfo.ES_PLUGIN_POLICY); if (Files.exists(policy)) { - PluginSecurity.readPolicy(policy, terminal, env, batch); + PluginSecurity.readPolicy(policy, terminal, env, isBatch); } return info; @@ -305,16 +337,16 @@ class InstallPluginCommand extends CliTool.Command { * Installs the plugin from {@code tmpRoot} into the plugins dir. * If the plugin has a bin dir and/or a config dir, those are copied. */ - private void install(Path tmpRoot, Environment env) throws Exception { + private void install(Terminal terminal, boolean isBatch, Path tmpRoot) throws Exception { List deleteOnFailure = new ArrayList<>(); deleteOnFailure.add(tmpRoot); try { - PluginInfo info = verify(tmpRoot, env); + PluginInfo info = verify(terminal, tmpRoot, isBatch); final Path destination = env.pluginsFile().resolve(info.getName()); if (Files.exists(destination)) { - throw new UserError(CliTool.ExitStatus.USAGE, "plugin directory " + destination.toAbsolutePath() + " already exists. To update the plugin, uninstall it first using 'remove " + info.getName() + "' command"); + throw new UserError(ExitCodes.USAGE, "plugin directory " + destination.toAbsolutePath() + " already exists. To update the plugin, uninstall it first using 'remove " + info.getName() + "' command"); } Path tmpBinDir = tmpRoot.resolve("bin"); @@ -347,7 +379,7 @@ class InstallPluginCommand extends CliTool.Command { /** Copies the files from {@code tmpBinDir} into {@code destBinDir}, along with permissions from dest dirs parent. */ private void installBin(PluginInfo info, Path tmpBinDir, Path destBinDir) throws Exception { if (Files.isDirectory(tmpBinDir) == false) { - throw new UserError(CliTool.ExitStatus.IO_ERROR, "bin in plugin " + info.getName() + " is not a directory"); + throw new UserError(ExitCodes.IO_ERROR, "bin in plugin " + info.getName() + " is not a directory"); } Files.createDirectory(destBinDir); @@ -365,7 +397,7 @@ class InstallPluginCommand extends CliTool.Command { try (DirectoryStream stream = Files.newDirectoryStream(tmpBinDir)) { for (Path srcFile : stream) { if (Files.isDirectory(srcFile)) { - throw new UserError(CliTool.ExitStatus.DATA_ERROR, "Directories not allowed in bin dir for plugin " + info.getName() + ", found " + srcFile.getFileName()); + throw new UserError(ExitCodes.DATA_ERROR, "Directories not allowed in bin dir for plugin " + info.getName() + ", found " + srcFile.getFileName()); } Path destFile = destBinDir.resolve(tmpBinDir.relativize(srcFile)); @@ -386,7 +418,7 @@ class InstallPluginCommand extends CliTool.Command { */ private void installConfig(PluginInfo info, Path tmpConfigDir, Path destConfigDir) throws Exception { if (Files.isDirectory(tmpConfigDir) == false) { - throw new UserError(CliTool.ExitStatus.IO_ERROR, "config in plugin " + info.getName() + " is not a directory"); + throw new UserError(ExitCodes.IO_ERROR, "config in plugin " + info.getName() + " is not a directory"); } // create the plugin's config dir "if necessary" @@ -395,7 +427,7 @@ class InstallPluginCommand extends CliTool.Command { try (DirectoryStream stream = Files.newDirectoryStream(tmpConfigDir)) { for (Path srcFile : stream) { if (Files.isDirectory(srcFile)) { - throw new UserError(CliTool.ExitStatus.DATA_ERROR, "Directories not allowed in config dir for plugin " + info.getName()); + throw new UserError(ExitCodes.DATA_ERROR, "Directories not allowed in config dir for plugin " + info.getName()); } Path destFile = destConfigDir.resolve(tmpConfigDir.relativize(srcFile)); diff --git a/core/src/main/java/org/elasticsearch/plugins/ListPluginsCommand.java b/core/src/main/java/org/elasticsearch/plugins/ListPluginsCommand.java index 6abed4e6bc2..953e698a4c2 100644 --- a/core/src/main/java/org/elasticsearch/plugins/ListPluginsCommand.java +++ b/core/src/main/java/org/elasticsearch/plugins/ListPluginsCommand.java @@ -24,22 +24,25 @@ import java.nio.file.DirectoryStream; import java.nio.file.Files; import java.nio.file.Path; -import org.elasticsearch.common.cli.CliTool; -import org.elasticsearch.common.cli.Terminal; -import org.elasticsearch.common.settings.Settings; +import joptsimple.OptionSet; +import org.elasticsearch.cli.Command; +import org.elasticsearch.cli.Terminal; import org.elasticsearch.env.Environment; /** * A command for the plugin cli to list plugins installed in elasticsearch. */ -class ListPluginsCommand extends CliTool.Command { +class ListPluginsCommand extends Command { - ListPluginsCommand(Terminal terminal) { - super(terminal); + private final Environment env; + + ListPluginsCommand(Environment env) { + super("Lists installed elasticsearch plugins"); + this.env = env; } @Override - public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception { + protected void execute(Terminal terminal, OptionSet options) throws Exception { if (Files.exists(env.pluginsFile()) == false) { throw new IOException("Plugins directory missing: " + env.pluginsFile()); } @@ -50,7 +53,5 @@ class ListPluginsCommand extends CliTool.Command { terminal.println(plugin.getFileName().toString()); } } - - return CliTool.ExitStatus.OK; } } diff --git a/core/src/main/java/org/elasticsearch/plugins/PluginCli.java b/core/src/main/java/org/elasticsearch/plugins/PluginCli.java index df402e6359d..323b872044e 100644 --- a/core/src/main/java/org/elasticsearch/plugins/PluginCli.java +++ b/core/src/main/java/org/elasticsearch/plugins/PluginCli.java @@ -19,106 +19,29 @@ package org.elasticsearch.plugins; -import org.apache.commons.cli.CommandLine; -import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.cli.CliTool; -import org.elasticsearch.common.cli.CliToolConfig; -import org.elasticsearch.common.cli.Terminal; -import org.elasticsearch.common.logging.LogConfigurator; +import org.apache.log4j.BasicConfigurator; +import org.apache.log4j.varia.NullAppender; +import org.elasticsearch.cli.MultiCommand; +import org.elasticsearch.cli.Terminal; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.node.internal.InternalSettingsPreparer; -import java.util.Locale; - -import static org.elasticsearch.common.cli.CliToolConfig.Builder.cmd; -import static org.elasticsearch.common.cli.CliToolConfig.Builder.option; - /** * A cli tool for adding, removing and listing plugins for elasticsearch. */ -public class PluginCli extends CliTool { +public class PluginCli extends MultiCommand { - // commands - private static final String LIST_CMD_NAME = "list"; - private static final String INSTALL_CMD_NAME = "install"; - private static final String REMOVE_CMD_NAME = "remove"; - - // usage config - private static final CliToolConfig.Cmd LIST_CMD = cmd(LIST_CMD_NAME, ListPluginsCommand.class).build(); - private static final CliToolConfig.Cmd INSTALL_CMD = cmd(INSTALL_CMD_NAME, InstallPluginCommand.class) - .options(option("b", "batch").required(false)) - .build(); - private static final CliToolConfig.Cmd REMOVE_CMD = cmd(REMOVE_CMD_NAME, RemovePluginCommand.class).build(); - - static final CliToolConfig CONFIG = CliToolConfig.config("plugin", PluginCli.class) - .cmds(LIST_CMD, INSTALL_CMD, REMOVE_CMD) - .build(); + public PluginCli(Environment env) { + super("A tool for managing installed elasticsearch plugins"); + subcommands.put("list", new ListPluginsCommand(env)); + subcommands.put("install", new InstallPluginCommand(env)); + subcommands.put("remove", new RemovePluginCommand(env)); + } public static void main(String[] args) throws Exception { - // initialize default for es.logger.level because we will not read the logging.yml - String loggerLevel = System.getProperty("es.logger.level", "INFO"); - // Set the appender for all potential log files to terminal so that other components that use the logger print out the - // same terminal. - // The reason for this is that the plugin cli cannot be configured with a file appender because when the plugin command is - // executed there is no way of knowing where the logfiles should be placed. For example, if elasticsearch - // is run as service then the logs should be at /var/log/elasticsearch but when started from the tar they should be at es.home/logs. - // Therefore we print to Terminal. - Environment env = InternalSettingsPreparer.prepareEnvironment(Settings.builder() - .put("appender.terminal.type", "terminal") - .put("rootLogger", "${es.logger.level}, terminal") - .put("es.logger.level", loggerLevel) - .build(), Terminal.DEFAULT); - // configure but do not read the logging conf file - LogConfigurator.configure(env.settings(), false); - int status = new PluginCli(Terminal.DEFAULT).execute(args).status(); - exit(status); - } - - @SuppressForbidden(reason = "Allowed to exit explicitly from #main()") - private static void exit(int status) { - System.exit(status); - } - - PluginCli(Terminal terminal) { - super(CONFIG, terminal); - } - - @Override - protected Command parse(String cmdName, CommandLine cli) throws Exception { - switch (cmdName.toLowerCase(Locale.ROOT)) { - case LIST_CMD_NAME: - return new ListPluginsCommand(terminal); - case INSTALL_CMD_NAME: - return parseInstallPluginCommand(cli); - case REMOVE_CMD_NAME: - return parseRemovePluginCommand(cli); - default: - assert false : "can't get here as cmd name is validated before this method is called"; - return exitCmd(ExitStatus.USAGE); - } - } - - private Command parseInstallPluginCommand(CommandLine cli) { - String[] args = cli.getArgs(); - if (args.length != 1) { - return exitCmd(ExitStatus.USAGE, terminal, "Must supply a single plugin id argument"); - } - - boolean batch = System.console() == null; - if (cli.hasOption("b")) { - batch = true; - } - - return new InstallPluginCommand(terminal, args[0], batch); - } - - private Command parseRemovePluginCommand(CommandLine cli) { - String[] args = cli.getArgs(); - if (args.length != 1) { - return exitCmd(ExitStatus.USAGE, terminal, "Must supply a single plugin name argument"); - } - - return new RemovePluginCommand(terminal, args[0]); + BasicConfigurator.configure(new NullAppender()); + Environment env = InternalSettingsPreparer.prepareEnvironment(Settings.EMPTY, Terminal.DEFAULT); + exit(new PluginCli(env).main(args, Terminal.DEFAULT)); } } diff --git a/core/src/main/java/org/elasticsearch/plugins/PluginSecurity.java b/core/src/main/java/org/elasticsearch/plugins/PluginSecurity.java index b14bcaf2ff3..f9c3d1826c9 100644 --- a/core/src/main/java/org/elasticsearch/plugins/PluginSecurity.java +++ b/core/src/main/java/org/elasticsearch/plugins/PluginSecurity.java @@ -20,8 +20,8 @@ package org.elasticsearch.plugins; import org.apache.lucene.util.IOUtils; -import org.elasticsearch.common.cli.Terminal; -import org.elasticsearch.common.cli.Terminal.Verbosity; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.Terminal.Verbosity; import org.elasticsearch.env.Environment; import java.io.IOException; diff --git a/core/src/main/java/org/elasticsearch/plugins/PluginsService.java b/core/src/main/java/org/elasticsearch/plugins/PluginsService.java index 3e36c5d8f09..cf953cd1529 100644 --- a/core/src/main/java/org/elasticsearch/plugins/PluginsService.java +++ b/core/src/main/java/org/elasticsearch/plugins/PluginsService.java @@ -37,6 +37,7 @@ import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexModule; @@ -71,7 +72,8 @@ public class PluginsService extends AbstractComponent { */ private final List> plugins; private final PluginsAndModules info; - public static final Setting> MANDATORY_SETTING = Setting.listSetting("plugin.mandatory", Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER); + public static final Setting> MANDATORY_SETTING = + Setting.listSetting("plugin.mandatory", Collections.emptyList(), Function.identity(), Property.NodeScope); private final Map> onModuleReferences; diff --git a/core/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java b/core/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java index 8ce1056bbfd..a3e6c375f83 100644 --- a/core/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java +++ b/core/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java @@ -19,40 +19,55 @@ package org.elasticsearch.plugins; -import org.apache.lucene.util.IOUtils; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.cli.CliTool; -import org.elasticsearch.common.cli.Terminal; -import org.elasticsearch.common.cli.UserError; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; - import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardCopyOption; import java.util.ArrayList; import java.util.List; -import static org.elasticsearch.common.cli.Terminal.Verbosity.VERBOSE; +import joptsimple.OptionSet; +import joptsimple.OptionSpec; +import org.apache.lucene.util.IOUtils; +import org.elasticsearch.cli.Command; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.UserError; +import org.elasticsearch.common.Strings; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.env.Environment; + +import static org.elasticsearch.cli.Terminal.Verbosity.VERBOSE; /** * A command for the plugin cli to remove a plugin from elasticsearch. */ -class RemovePluginCommand extends CliTool.Command { - private final String pluginName; +class RemovePluginCommand extends Command { - public RemovePluginCommand(Terminal terminal, String pluginName) { - super(terminal); - this.pluginName = pluginName; + private final Environment env; + private final OptionSpec arguments; + + RemovePluginCommand(Environment env) { + super("Removes a plugin from elasticsearch"); + this.env = env; + this.arguments = parser.nonOptions("plugin name"); } @Override - public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception { + protected void execute(Terminal terminal, OptionSet options) throws Exception { + // TODO: in jopt-simple 5.0 we can enforce a min/max number of positional args + List args = arguments.values(options); + if (args.size() != 1) { + throw new UserError(ExitCodes.USAGE, "Must supply a single plugin id argument"); + } + execute(terminal, args.get(0)); + } + + // pkg private for testing + void execute(Terminal terminal, String pluginName) throws Exception { terminal.println("-> Removing " + Strings.coalesceToEmpty(pluginName) + "..."); Path pluginDir = env.pluginsFile().resolve(pluginName); if (Files.exists(pluginDir) == false) { - throw new UserError(CliTool.ExitStatus.USAGE, "Plugin " + pluginName + " not found. Run 'plugin list' to get list of installed plugins."); + throw new UserError(ExitCodes.USAGE, "Plugin " + pluginName + " not found. Run 'plugin list' to get list of installed plugins."); } List pluginPaths = new ArrayList<>(); @@ -60,7 +75,7 @@ class RemovePluginCommand extends CliTool.Command { Path pluginBinDir = env.binFile().resolve(pluginName); if (Files.exists(pluginBinDir)) { if (Files.isDirectory(pluginBinDir) == false) { - throw new UserError(CliTool.ExitStatus.IO_ERROR, "Bin dir for " + pluginName + " is not a directory"); + throw new UserError(ExitCodes.IO_ERROR, "Bin dir for " + pluginName + " is not a directory"); } pluginPaths.add(pluginBinDir); terminal.println(VERBOSE, "Removing: " + pluginBinDir); @@ -72,7 +87,5 @@ class RemovePluginCommand extends CliTool.Command { pluginPaths.add(tmpPluginDir); IOUtils.rm(pluginPaths.toArray(new Path[pluginPaths.size()])); - - return CliTool.ExitStatus.OK; } } diff --git a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index a6ea381adb4..5d423552a56 100644 --- a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -478,7 +478,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent LOCATION_SETTING = new Setting<>("location", "", Function.identity(), false, Setting.Scope.CLUSTER); - public static final Setting REPOSITORIES_LOCATION_SETTING = new Setting<>("repositories.fs.location", LOCATION_SETTING, Function.identity(), false, Setting.Scope.CLUSTER); - public static final Setting CHUNK_SIZE_SETTING = Setting.byteSizeSetting("chunk_size", "-1", false, Setting.Scope.CLUSTER); - public static final Setting REPOSITORIES_CHUNK_SIZE_SETTING = Setting.byteSizeSetting("repositories.fs.chunk_size", "-1", false, Setting.Scope.CLUSTER); - public static final Setting COMPRESS_SETTING = Setting.boolSetting("compress", false, false, Setting.Scope.CLUSTER); - public static final Setting REPOSITORIES_COMPRESS_SETTING = Setting.boolSetting("repositories.fs.compress", false, false, Setting.Scope.CLUSTER); + public static final Setting LOCATION_SETTING = + new Setting<>("location", "", Function.identity(), Property.NodeScope); + public static final Setting REPOSITORIES_LOCATION_SETTING = + new Setting<>("repositories.fs.location", LOCATION_SETTING, Function.identity(), Property.NodeScope); + public static final Setting CHUNK_SIZE_SETTING = + Setting.byteSizeSetting("chunk_size", "-1", Property.NodeScope); + public static final Setting REPOSITORIES_CHUNK_SIZE_SETTING = + Setting.byteSizeSetting("repositories.fs.chunk_size", "-1", Property.NodeScope); + public static final Setting COMPRESS_SETTING = Setting.boolSetting("compress", false, Property.NodeScope); + public static final Setting REPOSITORIES_COMPRESS_SETTING = + Setting.boolSetting("repositories.fs.compress", false, Property.NodeScope); private final FsBlobStore blobStore; diff --git a/core/src/main/java/org/elasticsearch/repositories/uri/URLRepository.java b/core/src/main/java/org/elasticsearch/repositories/uri/URLRepository.java index 2d15db245aa..77d4f1cc816 100644 --- a/core/src/main/java/org/elasticsearch/repositories/uri/URLRepository.java +++ b/core/src/main/java/org/elasticsearch/repositories/uri/URLRepository.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.blobstore.url.URLBlobStore; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.util.URIPattern; import org.elasticsearch.env.Environment; import org.elasticsearch.index.snapshots.IndexShardRepository; @@ -55,19 +56,22 @@ public class URLRepository extends BlobStoreRepository { public final static String TYPE = "url"; - public static final Setting> SUPPORTED_PROTOCOLS_SETTING = Setting.listSetting("repositories.url.supported_protocols", - Arrays.asList("http", "https", "ftp", "file", "jar"), Function.identity(), false, Setting.Scope.CLUSTER); + public static final Setting> SUPPORTED_PROTOCOLS_SETTING = + Setting.listSetting("repositories.url.supported_protocols", Arrays.asList("http", "https", "ftp", "file", "jar"), + Function.identity(), Property.NodeScope); - public static final Setting> ALLOWED_URLS_SETTING = Setting.listSetting("repositories.url.allowed_urls", - Collections.emptyList(), URIPattern::new, false, Setting.Scope.CLUSTER); + public static final Setting> ALLOWED_URLS_SETTING = + Setting.listSetting("repositories.url.allowed_urls", Collections.emptyList(), URIPattern::new, Property.NodeScope); - public static final Setting URL_SETTING = new Setting<>("url", "http:", URLRepository::parseURL, false, Setting.Scope.CLUSTER); - public static final Setting REPOSITORIES_URL_SETTING = new Setting<>("repositories.url.url", (s) -> s.get("repositories.uri.url", "http:"), - URLRepository::parseURL, false, Setting.Scope.CLUSTER); + public static final Setting URL_SETTING = new Setting<>("url", "http:", URLRepository::parseURL, Property.NodeScope); + public static final Setting REPOSITORIES_URL_SETTING = + new Setting<>("repositories.url.url", (s) -> s.get("repositories.uri.url", "http:"), URLRepository::parseURL, + Property.NodeScope); - public static final Setting LIST_DIRECTORIES_SETTING = Setting.boolSetting("list_directories", true, false, Setting.Scope.CLUSTER); - public static final Setting REPOSITORIES_LIST_DIRECTORIES_SETTING = Setting.boolSetting("repositories.uri.list_directories", true, - false, Setting.Scope.CLUSTER); + public static final Setting LIST_DIRECTORIES_SETTING = + Setting.boolSetting("list_directories", true, Property.NodeScope); + public static final Setting REPOSITORIES_LIST_DIRECTORIES_SETTING = + Setting.boolSetting("repositories.uri.list_directories", true, Property.NodeScope); private final List supportedProtocols; diff --git a/core/src/main/java/org/elasticsearch/rest/BaseRestHandler.java b/core/src/main/java/org/elasticsearch/rest/BaseRestHandler.java index 60b3ccce930..b406dfca545 100644 --- a/core/src/main/java/org/elasticsearch/rest/BaseRestHandler.java +++ b/core/src/main/java/org/elasticsearch/rest/BaseRestHandler.java @@ -23,6 +23,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; /** @@ -34,7 +35,8 @@ import org.elasticsearch.common.settings.Settings; * {@link org.elasticsearch.rest.RestController#registerRelevantHeaders(String...)} */ public abstract class BaseRestHandler extends AbstractComponent implements RestHandler { - public static final Setting MULTI_ALLOW_EXPLICIT_INDEX = Setting.boolSetting("rest.action.multi.allow_explicit_index", true, false, Setting.Scope.CLUSTER); + public static final Setting MULTI_ALLOW_EXPLICIT_INDEX = + Setting.boolSetting("rest.action.multi.allow_explicit_index", true, Property.NodeScope); private final Client client; protected final ParseFieldMatcher parseFieldMatcher; diff --git a/core/src/main/java/org/elasticsearch/rest/BytesRestResponse.java b/core/src/main/java/org/elasticsearch/rest/BytesRestResponse.java index ac8eadade0b..52f624849fc 100644 --- a/core/src/main/java/org/elasticsearch/rest/BytesRestResponse.java +++ b/core/src/main/java/org/elasticsearch/rest/BytesRestResponse.java @@ -126,7 +126,11 @@ public class BytesRestResponse extends RestResponse { if (channel.request().paramAsBoolean("error_trace", !ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE_DEFAULT)) { params = new ToXContent.DelegatingMapParams(Collections.singletonMap(ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE, "false"), channel.request()); } else { - SUPPRESSED_ERROR_LOGGER.info("{} Params: {}", t, channel.request().path(), channel.request().params()); + if (status.getStatus() < 500) { + SUPPRESSED_ERROR_LOGGER.debug("{} Params: {}", t, channel.request().path(), channel.request().params()); + } else { + SUPPRESSED_ERROR_LOGGER.warn("{} Params: {}", t, channel.request().path(), channel.request().params()); + } params = channel.request(); } builder.field("error"); diff --git a/core/src/main/java/org/elasticsearch/rest/RestController.java b/core/src/main/java/org/elasticsearch/rest/RestController.java index 64e21002d8c..0cbfdd0ef1b 100644 --- a/core/src/main/java/org/elasticsearch/rest/RestController.java +++ b/core/src/main/java/org/elasticsearch/rest/RestController.java @@ -176,7 +176,7 @@ public class RestController extends AbstractLifecycleComponent { try { channel.sendResponse(new BytesRestResponse(channel, e)); } catch (Throwable e1) { - logger.error("failed to send failure response for uri [" + request.uri() + "]", e1); + logger.error("failed to send failure response for uri [{}]", e1, request.uri()); } } } else { @@ -275,7 +275,7 @@ public class RestController extends AbstractLifecycleComponent { try { channel.sendResponse(new BytesRestResponse(channel, e)); } catch (IOException e1) { - logger.error("Failed to send failure response for uri [" + request.uri() + "]", e1); + logger.error("Failed to send failure response for uri [{}]", e1, request.uri()); } } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java index 759fac2eb19..7c555c9b357 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java @@ -92,14 +92,16 @@ public class RestRecoveryAction extends AbstractCatAction { .addCell("repository", "alias:rep;desc:repository") .addCell("snapshot", "alias:snap;desc:snapshot") .addCell("files", "alias:f;desc:number of files to recover") + .addCell("files_recovered", "alias:fr;desc:files recovered") .addCell("files_percent", "alias:fp;desc:percent of files recovered") - .addCell("bytes", "alias:b;desc:size to recover in bytes") + .addCell("files_total", "alias:tf;desc:total number of files") + .addCell("bytes", "alias:b;desc:number of bytes to recover") + .addCell("bytes_recovered", "alias:br;desc:bytes recovered") .addCell("bytes_percent", "alias:bp;desc:percent of bytes recovered") - .addCell("total_files", "alias:tf;desc:total number of files") - .addCell("total_bytes", "alias:tb;desc:total number of bytes") - .addCell("translog", "alias:tr;desc:translog operations recovered") - .addCell("translog_percent", "alias:trp;desc:percent of translog recovery") - .addCell("total_translog", "alias:trt;desc:current total translog operations") + .addCell("bytes_total", "alias:tb;desc:total number of bytes") + .addCell("translog_ops", "alias:to;desc:number of translog ops to recover") + .addCell("translog_ops_recovered", "alias:tor;desc:translog ops recovered") + .addCell("translog_ops_percent", "alias:top;desc:percent of translog ops recovered") .endHeaders(); return t; } @@ -151,14 +153,16 @@ public class RestRecoveryAction extends AbstractCatAction { t.addCell(state.getRestoreSource() == null ? "n/a" : state.getRestoreSource().snapshotId().getRepository()); t.addCell(state.getRestoreSource() == null ? "n/a" : state.getRestoreSource().snapshotId().getSnapshot()); t.addCell(state.getIndex().totalRecoverFiles()); + t.addCell(state.getIndex().recoveredFileCount()); t.addCell(String.format(Locale.ROOT, "%1.1f%%", state.getIndex().recoveredFilesPercent())); - t.addCell(state.getIndex().totalRecoverBytes()); - t.addCell(String.format(Locale.ROOT, "%1.1f%%", state.getIndex().recoveredBytesPercent())); t.addCell(state.getIndex().totalFileCount()); + t.addCell(state.getIndex().totalRecoverBytes()); + t.addCell(state.getIndex().recoveredBytes()); + t.addCell(String.format(Locale.ROOT, "%1.1f%%", state.getIndex().recoveredBytesPercent())); t.addCell(state.getIndex().totalBytes()); + t.addCell(state.getTranslog().totalOperations()); t.addCell(state.getTranslog().recoveredOperations()); t.addCell(String.format(Locale.ROOT, "%1.1f%%", state.getTranslog().recoveredPercent())); - t.addCell(state.getTranslog().totalOperations()); t.endRow(); } } diff --git a/core/src/main/java/org/elasticsearch/script/ScriptService.java b/core/src/main/java/org/elasticsearch/script/ScriptService.java index cfc402dbb04..90c617540fc 100644 --- a/core/src/main/java/org/elasticsearch/script/ScriptService.java +++ b/core/src/main/java/org/elasticsearch/script/ScriptService.java @@ -46,6 +46,7 @@ import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; @@ -83,10 +84,13 @@ public class ScriptService extends AbstractComponent implements Closeable { static final String DISABLE_DYNAMIC_SCRIPTING_SETTING = "script.disable_dynamic"; - public static final Setting SCRIPT_CACHE_SIZE_SETTING = Setting.intSetting("script.cache.max_size", 100, 0, false, Setting.Scope.CLUSTER); - public static final Setting SCRIPT_CACHE_EXPIRE_SETTING = Setting.positiveTimeSetting("script.cache.expire", TimeValue.timeValueMillis(0), false, Setting.Scope.CLUSTER); + public static final Setting SCRIPT_CACHE_SIZE_SETTING = + Setting.intSetting("script.cache.max_size", 100, 0, Property.NodeScope); + public static final Setting SCRIPT_CACHE_EXPIRE_SETTING = + Setting.positiveTimeSetting("script.cache.expire", TimeValue.timeValueMillis(0), Property.NodeScope); public static final String SCRIPT_INDEX = ".scripts"; - public static final Setting SCRIPT_AUTO_RELOAD_ENABLED_SETTING = Setting.boolSetting("script.auto_reload_enabled", true, false, Setting.Scope.CLUSTER); + public static final Setting SCRIPT_AUTO_RELOAD_ENABLED_SETTING = + Setting.boolSetting("script.auto_reload_enabled", true, Property.NodeScope); private final String defaultLang; diff --git a/core/src/main/java/org/elasticsearch/script/ScriptSettings.java b/core/src/main/java/org/elasticsearch/script/ScriptSettings.java index 8ececfe25bb..1bf7fdfc843 100644 --- a/core/src/main/java/org/elasticsearch/script/ScriptSettings.java +++ b/core/src/main/java/org/elasticsearch/script/ScriptSettings.java @@ -21,6 +21,7 @@ package org.elasticsearch.script; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import java.util.ArrayList; @@ -44,8 +45,7 @@ public class ScriptSettings { ScriptModes.sourceKey(scriptType), scriptType.getDefaultScriptMode().getMode(), ScriptMode::parse, - false, - Setting.Scope.CLUSTER)); + Property.NodeScope)); } SCRIPT_TYPE_SETTING_MAP = Collections.unmodifiableMap(scriptTypeSettingMap); } @@ -66,7 +66,7 @@ public class ScriptSettings { throw new IllegalArgumentException("unregistered default language [" + setting + "]"); } return setting; - }, false, Setting.Scope.CLUSTER); + }, Property.NodeScope); } private static Map> contextSettings(ScriptContextRegistry scriptContextRegistry) { @@ -76,8 +76,7 @@ public class ScriptSettings { ScriptModes.operationKey(scriptContext), ScriptMode.OFF.getMode(), ScriptMode::parse, - false, - Setting.Scope.CLUSTER + Property.NodeScope )); } return scriptContextSettingMap; @@ -137,8 +136,7 @@ public class ScriptSettings { ScriptModes.getKey(language, scriptType, scriptContext), defaultSetting, ScriptMode::parse, - false, - Setting.Scope.CLUSTER); + Property.NodeScope); scriptModeSettings.add(setting); } } diff --git a/core/src/main/java/org/elasticsearch/search/SearchService.java b/core/src/main/java/org/elasticsearch/search/SearchService.java index 2e4dc150213..4c822bc68c6 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchService.java +++ b/core/src/main/java/org/elasticsearch/search/SearchService.java @@ -35,6 +35,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; @@ -112,11 +113,14 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes; public class SearchService extends AbstractLifecycleComponent implements IndexEventListener { // we can have 5 minutes here, since we make sure to clean with search requests and when shard/index closes - public static final Setting DEFAULT_KEEPALIVE_SETTING = Setting.positiveTimeSetting("search.default_keep_alive", timeValueMinutes(5), false, Setting.Scope.CLUSTER); - public static final Setting KEEPALIVE_INTERVAL_SETTING = Setting.positiveTimeSetting("search.keep_alive_interval", timeValueMinutes(1), false, Setting.Scope.CLUSTER); + public static final Setting DEFAULT_KEEPALIVE_SETTING = + Setting.positiveTimeSetting("search.default_keep_alive", timeValueMinutes(5), Property.NodeScope); + public static final Setting KEEPALIVE_INTERVAL_SETTING = + Setting.positiveTimeSetting("search.keep_alive_interval", timeValueMinutes(1), Property.NodeScope); public static final TimeValue NO_TIMEOUT = timeValueMillis(-1); - public static final Setting DEFAULT_SEARCH_TIMEOUT_SETTING = Setting.timeSetting("search.default_search_timeout", NO_TIMEOUT, true, Setting.Scope.CLUSTER); + public static final Setting DEFAULT_SEARCH_TIMEOUT_SETTING = + Setting.timeSetting("search.default_search_timeout", NO_TIMEOUT, Property.Dynamic, Property.NodeScope); private final ThreadPool threadPool; diff --git a/core/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/core/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index 340a7f6ce83..949d4607b63 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/core/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -343,9 +343,9 @@ public class SnapshotShardsService extends AbstractLifecycleComponent THREADPOOL_GROUP_SETTING = Setting.groupSetting("threadpool.", true, Setting.Scope.CLUSTER); + public static final Setting THREADPOOL_GROUP_SETTING = + Setting.groupSetting("threadpool.", Property.Dynamic, Property.NodeScope); private volatile Map executors; diff --git a/core/src/main/java/org/elasticsearch/transport/Transport.java b/core/src/main/java/org/elasticsearch/transport/Transport.java index c930773f39c..532c9d99ace 100644 --- a/core/src/main/java/org/elasticsearch/transport/Transport.java +++ b/core/src/main/java/org/elasticsearch/transport/Transport.java @@ -22,6 +22,7 @@ package org.elasticsearch.transport; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; @@ -35,7 +36,7 @@ import java.util.Map; public interface Transport extends LifecycleComponent { - Setting TRANSPORT_TCP_COMPRESS = Setting.boolSetting("transport.tcp.compress", false, false, Setting.Scope.CLUSTER); + Setting TRANSPORT_TCP_COMPRESS = Setting.boolSetting("transport.tcp.compress", false, Property.NodeScope); void transportServiceAdapter(TransportServiceAdapter service); diff --git a/core/src/main/java/org/elasticsearch/transport/TransportService.java b/core/src/main/java/org/elasticsearch/transport/TransportService.java index 7884ed04af1..0faad900339 100644 --- a/core/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/core/src/main/java/org/elasticsearch/transport/TransportService.java @@ -33,7 +33,7 @@ import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Scope; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; @@ -98,10 +98,11 @@ public class TransportService extends AbstractLifecycleComponent> TRACE_LOG_INCLUDE_SETTING = listSetting("transport.tracer.include", emptyList(), - Function.identity(), true, Scope.CLUSTER); - public static final Setting> TRACE_LOG_EXCLUDE_SETTING = listSetting("transport.tracer.exclude", - Arrays.asList("internal:discovery/zen/fd*", TransportLivenessAction.NAME), Function.identity(), true, Scope.CLUSTER); + public static final Setting> TRACE_LOG_INCLUDE_SETTING = + listSetting("transport.tracer.include", emptyList(), Function.identity(), Property.Dynamic, Property.NodeScope); + public static final Setting> TRACE_LOG_EXCLUDE_SETTING = + listSetting("transport.tracer.exclude", Arrays.asList("internal:discovery/zen/fd*", TransportLivenessAction.NAME), + Function.identity(), Property.Dynamic, Property.NodeScope); private final ESLogger tracerLog; @@ -380,7 +381,7 @@ public class TransportService extends AbstractLifecycleComponent> HOST = listSetting("transport.host", emptyList(), s -> s, false, Scope.CLUSTER); - public static final Setting> PUBLISH_HOST = listSetting("transport.publish_host", HOST, s -> s, false, Scope.CLUSTER); - public static final Setting> BIND_HOST = listSetting("transport.bind_host", HOST, s -> s, false, Scope.CLUSTER); - public static final Setting PORT = new Setting<>("transport.tcp.port", "9300-9400", s -> s, false, Scope.CLUSTER); - public static final Setting PUBLISH_PORT = intSetting("transport.publish_port", -1, -1, false, Scope.CLUSTER); + public static final Setting> HOST = + listSetting("transport.host", emptyList(), Function.identity(), Property.NodeScope); + public static final Setting> PUBLISH_HOST = + listSetting("transport.publish_host", HOST, Function.identity(), Property.NodeScope); + public static final Setting> BIND_HOST = + listSetting("transport.bind_host", HOST, Function.identity(), Property.NodeScope); + public static final Setting PORT = + new Setting<>("transport.tcp.port", "9300-9400", Function.identity(), Property.NodeScope); + public static final Setting PUBLISH_PORT = + intSetting("transport.publish_port", -1, -1, Property.NodeScope); public static final String DEFAULT_PROFILE = "default"; - public static final Setting TRANSPORT_PROFILES_SETTING = groupSetting("transport.profiles.", true, Scope.CLUSTER); + public static final Setting TRANSPORT_PROFILES_SETTING = + groupSetting("transport.profiles.", Property.Dynamic, Property.NodeScope); private TransportSettings() { diff --git a/core/src/main/java/org/elasticsearch/transport/local/LocalTransport.java b/core/src/main/java/org/elasticsearch/transport/local/LocalTransport.java index faef71998a9..a7783148ef3 100644 --- a/core/src/main/java/org/elasticsearch/transport/local/LocalTransport.java +++ b/core/src/main/java/org/elasticsearch/transport/local/LocalTransport.java @@ -272,7 +272,7 @@ public class LocalTransport extends AbstractLifecycleComponent implem handleException(handler, new RemoteTransportException(nodeName(), localAddress, action, e)); } } else { - logger.warn("Failed to receive message for action [" + action + "]", e); + logger.warn("Failed to receive message for action [{}]", e, action); } } } @@ -314,7 +314,7 @@ public class LocalTransport extends AbstractLifecycleComponent implem try { transportChannel.sendResponse(e); } catch (Throwable e1) { - logger.warn("Failed to send error message back to client for action [" + action + "]", e1); + logger.warn("Failed to send error message back to client for action [{}]", e1, action); logger.warn("Actual Exception", e); } } @@ -325,7 +325,7 @@ public class LocalTransport extends AbstractLifecycleComponent implem try { transportChannel.sendResponse(e); } catch (Throwable e1) { - logger.warn("Failed to send error message back to client for action [" + action + "]", e); + logger.warn("Failed to send error message back to client for action [{}]", e, action); logger.warn("Actual Exception", e1); } diff --git a/core/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java b/core/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java index e4dbbfa73af..302f8296ad3 100644 --- a/core/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java +++ b/core/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java @@ -274,7 +274,7 @@ public class MessageChannelHandler extends SimpleChannelUpstreamHandler { try { transportChannel.sendResponse(e); } catch (IOException e1) { - logger.warn("Failed to send error message back to client for action [" + action + "]", e); + logger.warn("Failed to send error message back to client for action [{}]", e, action); logger.warn("Actual Exception", e1); } } @@ -336,7 +336,7 @@ public class MessageChannelHandler extends SimpleChannelUpstreamHandler { try { transportChannel.sendResponse(e); } catch (Throwable e1) { - logger.warn("Failed to send error message back to client for action [" + reg.getAction() + "]", e1); + logger.warn("Failed to send error message back to client for action [{}]", e1, reg.getAction()); logger.warn("Actual Exception", e); } } diff --git a/core/src/main/java/org/elasticsearch/transport/netty/NettyInternalESLogger.java b/core/src/main/java/org/elasticsearch/transport/netty/NettyInternalESLogger.java index ed92aa261db..2a1fc3226a4 100644 --- a/core/src/main/java/org/elasticsearch/transport/netty/NettyInternalESLogger.java +++ b/core/src/main/java/org/elasticsearch/transport/netty/NettyInternalESLogger.java @@ -19,12 +19,14 @@ package org.elasticsearch.transport.netty; +import org.elasticsearch.common.SuppressLoggerChecks; import org.elasticsearch.common.logging.ESLogger; import org.jboss.netty.logging.AbstractInternalLogger; /** * */ +@SuppressLoggerChecks(reason = "safely delegates to logger") public class NettyInternalESLogger extends AbstractInternalLogger { private final ESLogger logger; diff --git a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java index 27ba643ef71..da629a0d47f 100644 --- a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java +++ b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java @@ -45,7 +45,7 @@ import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.network.NetworkService.TcpSettings; import org.elasticsearch.common.network.NetworkUtils; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Scope; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.InetSocketTransportAddress; @@ -150,40 +150,45 @@ public class NettyTransport extends AbstractLifecycleComponent implem public static final String TRANSPORT_CLIENT_WORKER_THREAD_NAME_PREFIX = "transport_client_worker"; public static final String TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX = "transport_client_boss"; - public static final Setting WORKER_COUNT = new Setting<>("transport.netty.worker_count", + public static final Setting WORKER_COUNT = + new Setting<>("transport.netty.worker_count", (s) -> Integer.toString(EsExecutors.boundedNumberOfProcessors(s) * 2), - (s) -> Setting.parseInt(s, 1, "transport.netty.worker_count"), false, Setting.Scope.CLUSTER); - public static final Setting CONNECTIONS_PER_NODE_RECOVERY = intSetting("transport.connections_per_node.recovery", 2, 1, false, - Scope.CLUSTER); - public static final Setting CONNECTIONS_PER_NODE_BULK = intSetting("transport.connections_per_node.bulk", 3, 1, false, - Scope.CLUSTER); - public static final Setting CONNECTIONS_PER_NODE_REG = intSetting("transport.connections_per_node.reg", 6, 1, false, - Scope.CLUSTER); - public static final Setting CONNECTIONS_PER_NODE_STATE = intSetting("transport.connections_per_node.state", 1, 1, false, - Scope.CLUSTER); - public static final Setting CONNECTIONS_PER_NODE_PING = intSetting("transport.connections_per_node.ping", 1, 1, false, - Scope.CLUSTER); + (s) -> Setting.parseInt(s, 1, "transport.netty.worker_count"), Property.NodeScope); + public static final Setting CONNECTIONS_PER_NODE_RECOVERY = + intSetting("transport.connections_per_node.recovery", 2, 1, Property.NodeScope); + public static final Setting CONNECTIONS_PER_NODE_BULK = + intSetting("transport.connections_per_node.bulk", 3, 1, Property.NodeScope); + public static final Setting CONNECTIONS_PER_NODE_REG = + intSetting("transport.connections_per_node.reg", 6, 1, Property.NodeScope); + public static final Setting CONNECTIONS_PER_NODE_STATE = + intSetting("transport.connections_per_node.state", 1, 1, Property.NodeScope); + public static final Setting CONNECTIONS_PER_NODE_PING = + intSetting("transport.connections_per_node.ping", 1, 1, Property.NodeScope); // the scheduled internal ping interval setting, defaults to disabled (-1) - public static final Setting PING_SCHEDULE = timeSetting("transport.ping_schedule", TimeValue.timeValueSeconds(-1), false, - Setting.Scope.CLUSTER); - public static final Setting TCP_BLOCKING_CLIENT = boolSetting("transport.tcp.blocking_client", TcpSettings.TCP_BLOCKING_CLIENT, - false, Setting.Scope.CLUSTER); - public static final Setting TCP_CONNECT_TIMEOUT = timeSetting("transport.tcp.connect_timeout", - TcpSettings.TCP_CONNECT_TIMEOUT, false, Setting.Scope.CLUSTER); - public static final Setting TCP_NO_DELAY = boolSetting("transport.tcp_no_delay", TcpSettings.TCP_NO_DELAY, false, - Setting.Scope.CLUSTER); - public static final Setting TCP_KEEP_ALIVE = boolSetting("transport.tcp.keep_alive", TcpSettings.TCP_KEEP_ALIVE, false, - Setting.Scope.CLUSTER); - public static final Setting TCP_BLOCKING_SERVER = boolSetting("transport.tcp.blocking_server", TcpSettings.TCP_BLOCKING_SERVER, - false, Setting.Scope.CLUSTER); - public static final Setting TCP_REUSE_ADDRESS = boolSetting("transport.tcp.reuse_address", TcpSettings.TCP_REUSE_ADDRESS, - false, Setting.Scope.CLUSTER); + public static final Setting PING_SCHEDULE = + timeSetting("transport.ping_schedule", TimeValue.timeValueSeconds(-1), Property.NodeScope); + public static final Setting TCP_BLOCKING_CLIENT = + boolSetting("transport.tcp.blocking_client", TcpSettings.TCP_BLOCKING_CLIENT, Property.NodeScope); + public static final Setting TCP_CONNECT_TIMEOUT = + timeSetting("transport.tcp.connect_timeout", TcpSettings.TCP_CONNECT_TIMEOUT, Property.NodeScope); + public static final Setting TCP_NO_DELAY = + boolSetting("transport.tcp_no_delay", TcpSettings.TCP_NO_DELAY, Property.NodeScope); + public static final Setting TCP_KEEP_ALIVE = + boolSetting("transport.tcp.keep_alive", TcpSettings.TCP_KEEP_ALIVE, Property.NodeScope); + public static final Setting TCP_BLOCKING_SERVER = + boolSetting("transport.tcp.blocking_server", TcpSettings.TCP_BLOCKING_SERVER, Property.NodeScope); + public static final Setting TCP_REUSE_ADDRESS = + boolSetting("transport.tcp.reuse_address", TcpSettings.TCP_REUSE_ADDRESS, Property.NodeScope); - public static final Setting TCP_SEND_BUFFER_SIZE = Setting.byteSizeSetting("transport.tcp.send_buffer_size", TcpSettings.TCP_SEND_BUFFER_SIZE, false, Setting.Scope.CLUSTER); - public static final Setting TCP_RECEIVE_BUFFER_SIZE = Setting.byteSizeSetting("transport.tcp.receive_buffer_size", TcpSettings.TCP_RECEIVE_BUFFER_SIZE, false, Setting.Scope.CLUSTER); + public static final Setting TCP_SEND_BUFFER_SIZE = + Setting.byteSizeSetting("transport.tcp.send_buffer_size", TcpSettings.TCP_SEND_BUFFER_SIZE, Property.NodeScope); + public static final Setting TCP_RECEIVE_BUFFER_SIZE = + Setting.byteSizeSetting("transport.tcp.receive_buffer_size", TcpSettings.TCP_RECEIVE_BUFFER_SIZE, Property.NodeScope); - public static final Setting NETTY_MAX_CUMULATION_BUFFER_CAPACITY = Setting.byteSizeSetting("transport.netty.max_cumulation_buffer_capacity", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER); - public static final Setting NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS = Setting.intSetting("transport.netty.max_composite_buffer_components", -1, -1, false, Setting.Scope.CLUSTER); + public static final Setting NETTY_MAX_CUMULATION_BUFFER_CAPACITY = + Setting.byteSizeSetting("transport.netty.max_cumulation_buffer_capacity", new ByteSizeValue(-1), Property.NodeScope); + public static final Setting NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS = + Setting.intSetting("transport.netty.max_composite_buffer_components", -1, -1, Property.NodeScope); // See AdaptiveReceiveBufferSizePredictor#DEFAULT_XXX for default values in netty..., we can use higher ones for us, even fixed one public static final Setting NETTY_RECEIVE_PREDICTOR_SIZE = Setting.byteSizeSetting( @@ -196,12 +201,13 @@ public class NettyTransport extends AbstractLifecycleComponent implem defaultReceiverPredictor = Math.min(defaultReceiverPredictor, Math.max(l, 64 * 1024)); } return new ByteSizeValue(defaultReceiverPredictor).toString(); - }, false, Setting.Scope.CLUSTER); - public static final Setting NETTY_RECEIVE_PREDICTOR_MIN = byteSizeSetting("transport.netty.receive_predictor_min", - NETTY_RECEIVE_PREDICTOR_SIZE, false, Scope.CLUSTER); - public static final Setting NETTY_RECEIVE_PREDICTOR_MAX = byteSizeSetting("transport.netty.receive_predictor_max", - NETTY_RECEIVE_PREDICTOR_SIZE, false, Scope.CLUSTER); - public static final Setting NETTY_BOSS_COUNT = intSetting("transport.netty.boss_count", 1, 1, false, Scope.CLUSTER); + }, Property.NodeScope); + public static final Setting NETTY_RECEIVE_PREDICTOR_MIN = + byteSizeSetting("transport.netty.receive_predictor_min", NETTY_RECEIVE_PREDICTOR_SIZE, Property.NodeScope); + public static final Setting NETTY_RECEIVE_PREDICTOR_MAX = + byteSizeSetting("transport.netty.receive_predictor_max", NETTY_RECEIVE_PREDICTOR_SIZE, Property.NodeScope); + public static final Setting NETTY_BOSS_COUNT = + intSetting("transport.netty.boss_count", 1, 1, Property.NodeScope); protected final NetworkService networkService; protected final Version version; @@ -1379,9 +1385,9 @@ public class NettyTransport extends AbstractLifecycleComponent implem @Override public void onFailure(Throwable t) { if (lifecycle.stoppedOrClosed()) { - logger.trace("[{}] failed to send ping transport message", t); + logger.trace("failed to send ping transport message", t); } else { - logger.warn("[{}] failed to send ping transport message", t); + logger.warn("failed to send ping transport message", t); } } } diff --git a/core/src/main/java/org/elasticsearch/tribe/TribeService.java b/core/src/main/java/org/elasticsearch/tribe/TribeService.java index 2bd40539807..bf7983e3c9d 100644 --- a/core/src/main/java/org/elasticsearch/tribe/TribeService.java +++ b/core/src/main/java/org/elasticsearch/tribe/TribeService.java @@ -45,6 +45,7 @@ import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.set.Sets; @@ -124,7 +125,7 @@ public class TribeService extends AbstractLifecycleComponent { } // internal settings only - public static final Setting TRIBE_NAME_SETTING = Setting.simpleString("tribe.name", false, Setting.Scope.CLUSTER); + public static final Setting TRIBE_NAME_SETTING = Setting.simpleString("tribe.name", Property.NodeScope); private final ClusterService clusterService; private final String[] blockIndicesWrite; private final String[] blockIndicesRead; @@ -143,18 +144,18 @@ public class TribeService extends AbstractLifecycleComponent { throw new IllegalArgumentException( "Invalid value for [tribe.on_conflict] must be either [any, drop or start with prefer_] but was: [" + s + "]"); } - }, false, Setting.Scope.CLUSTER); + }, Property.NodeScope); - public static final Setting BLOCKS_METADATA_SETTING = Setting.boolSetting("tribe.blocks.metadata", false, false, - Setting.Scope.CLUSTER); - public static final Setting BLOCKS_WRITE_SETTING = Setting.boolSetting("tribe.blocks.write", false, false, - Setting.Scope.CLUSTER); - public static final Setting> BLOCKS_WRITE_INDICES_SETTING = Setting.listSetting("tribe.blocks.write.indices", - Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER); - public static final Setting> BLOCKS_READ_INDICES_SETTING = Setting.listSetting("tribe.blocks.read.indices", - Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER); - public static final Setting> BLOCKS_METADATA_INDICES_SETTING = Setting.listSetting("tribe.blocks.metadata.indices", - Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER); + public static final Setting BLOCKS_METADATA_SETTING = + Setting.boolSetting("tribe.blocks.metadata", false, Property.NodeScope); + public static final Setting BLOCKS_WRITE_SETTING = + Setting.boolSetting("tribe.blocks.write", false, Property.NodeScope); + public static final Setting> BLOCKS_WRITE_INDICES_SETTING = + Setting.listSetting("tribe.blocks.write.indices", Collections.emptyList(), Function.identity(), Property.NodeScope); + public static final Setting> BLOCKS_READ_INDICES_SETTING = + Setting.listSetting("tribe.blocks.read.indices", Collections.emptyList(), Function.identity(), Property.NodeScope); + public static final Setting> BLOCKS_METADATA_INDICES_SETTING = + Setting.listSetting("tribe.blocks.metadata.indices", Collections.emptyList(), Function.identity(), Property.NodeScope); public static final Set TRIBE_SETTING_KEYS = Sets.newHashSet(TRIBE_NAME_SETTING.getKey(), ON_CONFLICT_SETTING.getKey(), BLOCKS_METADATA_INDICES_SETTING.getKey(), BLOCKS_METADATA_SETTING.getKey(), BLOCKS_READ_INDICES_SETTING.getKey(), BLOCKS_WRITE_INDICES_SETTING.getKey(), BLOCKS_WRITE_SETTING.getKey()); @@ -262,7 +263,7 @@ public class TribeService extends AbstractLifecycleComponent { try { otherNode.close(); } catch (Throwable t) { - logger.warn("failed to close node {} on failed start", otherNode, t); + logger.warn("failed to close node {} on failed start", t, otherNode); } } if (e instanceof RuntimeException) { diff --git a/core/src/main/resources/org/elasticsearch/bootstrap/elasticsearch-start.help b/core/src/main/resources/org/elasticsearch/bootstrap/elasticsearch-start.help deleted file mode 100644 index 9b27a8dd390..00000000000 --- a/core/src/main/resources/org/elasticsearch/bootstrap/elasticsearch-start.help +++ /dev/null @@ -1,28 +0,0 @@ -NAME - - start - Start Elasticsearch - -SYNOPSIS - - elasticsearch start - -DESCRIPTION - - This command starts Elasticsearch. You can configure it to run in the foreground, write a pid file - and configure arbitrary options that override file-based configuration. - -OPTIONS - - -h,--help Shows this message - - -p,--pidfile Creates a pid file in the specified path on start - - -d,--daemonize Starts Elasticsearch in the background - - -Dproperty=value Configures an Elasticsearch specific property, like -Dnetwork.host=127.0.0.1 - - --property=value Configures an elasticsearch specific property, like --network.host 127.0.0.1 - --property value - - NOTE: The -d, -p, and -D arguments must appear before any --property arguments. - diff --git a/core/src/main/resources/org/elasticsearch/bootstrap/elasticsearch-version.help b/core/src/main/resources/org/elasticsearch/bootstrap/elasticsearch-version.help deleted file mode 100644 index 00f2a33401c..00000000000 --- a/core/src/main/resources/org/elasticsearch/bootstrap/elasticsearch-version.help +++ /dev/null @@ -1,16 +0,0 @@ -NAME - - version - Show version information and exit - -SYNOPSIS - - elasticsearch version - -DESCRIPTION - - This command shows Elasticsearch version, timestamp and build information as well as JVM info - -OPTIONS - - -h,--help Shows this message - diff --git a/core/src/main/resources/org/elasticsearch/bootstrap/elasticsearch.help b/core/src/main/resources/org/elasticsearch/bootstrap/elasticsearch.help deleted file mode 100644 index 83ee497dc21..00000000000 --- a/core/src/main/resources/org/elasticsearch/bootstrap/elasticsearch.help +++ /dev/null @@ -1,22 +0,0 @@ -NAME - - elasticsearch - Manages elasticsearch - -SYNOPSIS - - elasticsearch - -DESCRIPTION - - Start an elasticsearch node - -COMMANDS - - start Start elasticsearch - - version Show version information and exit - -NOTES - - [*] For usage help on specific commands please type "elasticsearch -h" - diff --git a/core/src/main/resources/org/elasticsearch/bootstrap/security.policy b/core/src/main/resources/org/elasticsearch/bootstrap/security.policy index 4909959015b..3e8bdbb0ad4 100644 --- a/core/src/main/resources/org/elasticsearch/bootstrap/security.policy +++ b/core/src/main/resources/org/elasticsearch/bootstrap/security.policy @@ -31,7 +31,7 @@ grant codeBase "${codebase.securesm-1.0.jar}" { //// Very special jar permissions: //// These are dangerous permissions that we don't want to grant to everything. -grant codeBase "${codebase.lucene-core-6.0.0-snapshot-bea235f.jar}" { +grant codeBase "${codebase.lucene-core-6.0.0-snapshot-f0aa4fc.jar}" { // needed to allow MMapDirectory's "unmap hack" (die unmap hack, die) // java 8 package permission java.lang.RuntimePermission "accessClassInPackage.sun.misc"; diff --git a/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy b/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy index fafa57118c2..8d56bc44b9a 100644 --- a/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy +++ b/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy @@ -31,7 +31,7 @@ grant codeBase "${codebase.securemock-1.2.jar}" { permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; }; -grant codeBase "${codebase.lucene-test-framework-6.0.0-snapshot-bea235f.jar}" { +grant codeBase "${codebase.lucene-test-framework-6.0.0-snapshot-f0aa4fc.jar}" { // needed by RamUsageTester permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; }; diff --git a/core/src/main/resources/org/elasticsearch/plugins/plugin-install.help b/core/src/main/resources/org/elasticsearch/plugins/plugin-install.help deleted file mode 100644 index ba39e1ab8fb..00000000000 --- a/core/src/main/resources/org/elasticsearch/plugins/plugin-install.help +++ /dev/null @@ -1,59 +0,0 @@ -NAME - - install - Install a plugin - -SYNOPSIS - - plugin install - -DESCRIPTION - - This command installs an elasticsearch plugin. It can be used as follows: - - Officially supported or commercial plugins require just the plugin name: - - plugin install analysis-icu - plugin install x-pack - - Plugins from Maven Central require 'groupId:artifactId:version': - - plugin install org.elasticsearch:mapper-attachments:3.0.0 - - Plugins can be installed from a custom URL or file location as follows: - - plugin install http://some.domain.name//my-plugin-1.0.0.zip - plugin install file:/path/to/my-plugin-1.0.0.zip - -OFFICIAL PLUGINS - - The following plugins are officially supported and can be installed by just referring to their name - - - analysis-icu - - analysis-kuromoji - - analysis-phonetic - - analysis-smartcn - - analysis-stempel - - delete-by-query - - discovery-azure - - discovery-ec2 - - discovery-gce - - ingest-geoip - - lang-javascript - - lang-painless - - lang-python - - mapper-attachments - - mapper-murmur3 - - mapper-size - - repository-azure - - repository-hdfs - - repository-s3 - - store-smb - - -OPTIONS - - -v,--verbose Verbose output - - -h,--help Shows this message - - -b,--batch Enable batch mode explicitly, automatic confirmation of security permissions diff --git a/core/src/main/resources/org/elasticsearch/plugins/plugin-list.help b/core/src/main/resources/org/elasticsearch/plugins/plugin-list.help deleted file mode 100644 index c13949e8cb6..00000000000 --- a/core/src/main/resources/org/elasticsearch/plugins/plugin-list.help +++ /dev/null @@ -1,12 +0,0 @@ -NAME - - list - List all plugins - -SYNOPSIS - - plugin list - -DESCRIPTION - - This command lists all installed elasticsearch plugins - diff --git a/core/src/main/resources/org/elasticsearch/plugins/plugin-remove.help b/core/src/main/resources/org/elasticsearch/plugins/plugin-remove.help deleted file mode 100644 index b708adf1f69..00000000000 --- a/core/src/main/resources/org/elasticsearch/plugins/plugin-remove.help +++ /dev/null @@ -1,12 +0,0 @@ -NAME - - remove - Remove a plugin - -SYNOPSIS - - plugin remove - -DESCRIPTION - - This command removes an elasticsearch plugin - diff --git a/core/src/main/resources/org/elasticsearch/plugins/plugin.help b/core/src/main/resources/org/elasticsearch/plugins/plugin.help deleted file mode 100644 index 5cba544627a..00000000000 --- a/core/src/main/resources/org/elasticsearch/plugins/plugin.help +++ /dev/null @@ -1,24 +0,0 @@ -NAME - - plugin - Manages plugins - -SYNOPSIS - - plugin - -DESCRIPTION - - Manage plugins - -COMMANDS - - install Install a plugin - - remove Remove a plugin - - list List installed plugins - -NOTES - - [*] For usage help on specific commands please type "plugin -h" - diff --git a/core/src/test/java/org/elasticsearch/VersionTests.java b/core/src/test/java/org/elasticsearch/VersionTests.java index 7824ecd39b1..eec912989a7 100644 --- a/core/src/test/java/org/elasticsearch/VersionTests.java +++ b/core/src/test/java/org/elasticsearch/VersionTests.java @@ -231,7 +231,7 @@ public class VersionTests extends ESTestCase { assertTrue(constantName + " should be final", Modifier.isFinal(versionConstant.getModifiers())); Version v = (Version) versionConstant.get(Version.class); - logger.info("Checking " + v); + logger.info("Checking {}", v); assertEquals("Version id " + field.getName() + " does not point to " + constantName, v, Version.fromId(versionId)); assertEquals("Version " + constantName + " does not have correct id", versionId, v.id); if (v.major >= 2) { diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java index 556eee238fd..2fe79d25ebb 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java @@ -294,14 +294,14 @@ public class TransportTasksActionTests extends TaskManagerTestCase { actions[i] = new TestNodesAction(Settings.EMPTY, "testAction", clusterName, threadPool, testNodes[i].clusterService, testNodes[i].transportService) { @Override protected NodeResponse nodeOperation(NodeRequest request) { - logger.info("Action on node " + node); + logger.info("Action on node {}", node); actionLatch.countDown(); try { checkLatch.await(); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); } - logger.info("Action on node " + node + " finished"); + logger.info("Action on node {} finished", node); return new NodeResponse(testNodes[node].discoveryNode); } }; @@ -565,7 +565,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase { actions[i] = new TestNodesAction(Settings.EMPTY, "testAction", clusterName, threadPool, testNodes[i].clusterService, testNodes[i].transportService) { @Override protected NodeResponse nodeOperation(NodeRequest request) { - logger.info("Action on node " + node); + logger.info("Action on node {}", node); throw new RuntimeException("Test exception"); } }; @@ -604,9 +604,9 @@ public class TransportTasksActionTests extends TaskManagerTestCase { tasksActions[i] = new TestTasksAction(Settings.EMPTY, "testTasksAction", clusterName, threadPool, testNodes[i].clusterService, testNodes[i].transportService) { @Override protected TestTaskResponse taskOperation(TestTasksRequest request, Task task) { - logger.info("Task action on node " + node); + logger.info("Task action on node {}", node); if (failTaskOnNode == node && task.getParentTaskId().isSet() == false) { - logger.info("Failing on node " + node); + logger.info("Failing on node {}", node); throw new RuntimeException("Task level failure"); } return new TestTaskResponse("Success on node " + node); diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java index 9c554da781a..503db65e810 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java @@ -34,7 +34,7 @@ import static org.hamcrest.Matchers.hasSize; /** * This class tests that repository operations (Put, Delete, Verify) are blocked when the cluster is read-only. * - * The @ClusterScope TEST is needed because this class updates the cluster setting "cluster.blocks.read_only". + * The @NodeScope TEST is needed because this class updates the cluster setting "cluster.blocks.read_only". */ @ClusterScope(scope = ESIntegTestCase.Scope.TEST) public class RepositoryBlocksIT extends ESIntegTestCase { diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java index f3a23be919d..82a2637d76b 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java @@ -41,7 +41,7 @@ import static org.hamcrest.Matchers.hasSize; /** * This class tests that snapshot operations (Create, Delete, Restore) are blocked when the cluster is read-only. * - * The @ClusterScope TEST is needed because this class updates the cluster setting "cluster.blocks.read_only". + * The @NodeScope TEST is needed because this class updates the cluster setting "cluster.blocks.read_only". */ @ClusterScope(scope = ESIntegTestCase.Scope.TEST) public class SnapshotBlocksIT extends ESIntegTestCase { diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/upgrade/UpgradeIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/upgrade/UpgradeIT.java index baca9508a8b..620cef31f9a 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/upgrade/UpgradeIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/upgrade/UpgradeIT.java @@ -134,7 +134,7 @@ public class UpgradeIT extends ESBackcompatTestCase { // means we can never generate ancient segments in this test (unless Lucene major version bumps but ES major version does not): assertFalse(hasAncientSegments(client(), indexToUpgrade)); - logger.info("--> Running upgrade on index " + indexToUpgrade); + logger.info("--> Running upgrade on index {}", indexToUpgrade); assertNoFailures(client().admin().indices().prepareUpgrade(indexToUpgrade).get()); awaitBusy(() -> { try { @@ -228,7 +228,7 @@ public class UpgradeIT extends ESBackcompatTestCase { ESLogger logger = Loggers.getLogger(UpgradeIT.class); int toUpgrade = 0; for (IndexUpgradeStatus status : getUpgradeStatus(client, index)) { - logger.info("Index: " + status.getIndex() + ", total: " + status.getTotalBytes() + ", toUpgrade: " + status.getToUpgradeBytes()); + logger.info("Index: {}, total: {}, toUpgrade: {}", status.getIndex(), status.getTotalBytes(), status.getToUpgradeBytes()); toUpgrade += status.getToUpgradeBytes(); } return toUpgrade == 0; diff --git a/core/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityIT.java index 7e46825398b..ae739701593 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityIT.java @@ -191,7 +191,7 @@ public class BasicBackwardsCompatibilityIT extends ESBackcompatTestCase { numDocs *= 2; } - logger.info(" --> waiting for relocation to complete", numDocs); + logger.info(" --> waiting for relocation of [{}] docs to complete", numDocs); ensureYellow("test");// move all shards to the new node (it waits on relocation) final int numIters = randomIntBetween(10, 20); for (int i = 0; i < numIters; i++) { diff --git a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java index 8e3dbd5f563..1b0988f21ba 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java @@ -162,7 +162,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { singleDataPath = nodePaths[0].resolve(NodeEnvironment.INDICES_FOLDER); assertFalse(Files.exists(singleDataPath)); Files.createDirectories(singleDataPath); - logger.info("--> Single data path: " + singleDataPath.toString()); + logger.info("--> Single data path: {}", singleDataPath); // find multi data path dirs nodePaths = internalCluster().getInstance(NodeEnvironment.class, multiDataPathNode.get()).nodeDataPaths(); @@ -173,7 +173,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { assertFalse(Files.exists(multiDataPath[1])); Files.createDirectories(multiDataPath[0]); Files.createDirectories(multiDataPath[1]); - logger.info("--> Multi data paths: " + multiDataPath[0].toString() + ", " + multiDataPath[1].toString()); + logger.info("--> Multi data paths: {}, {}", multiDataPath[0], multiDataPath[1]); replicas.get(); // wait for replicas } @@ -239,13 +239,13 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { if (file.getFileName().toString().equals(IndexWriter.WRITE_LOCK_NAME)) { // skip lock file, we don't need it - logger.trace("Skipping lock file: " + file.toString()); + logger.trace("Skipping lock file: {}", file); return FileVisitResult.CONTINUE; } Path relativeFile = src.relativize(file); Path destFile = destinationDataPath.resolve(indexName).resolve(relativeFile); - logger.trace("--> Moving " + relativeFile.toString() + " to " + destFile.toString()); + logger.trace("--> Moving {} to {}", relativeFile, destFile); Files.move(file, destFile); assertFalse(Files.exists(file)); assertTrue(Files.exists(destFile)); @@ -269,7 +269,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { for (String index : indexes) { if (expectedVersions.remove(index) == false) { - logger.warn("Old indexes tests contain extra index: " + index); + logger.warn("Old indexes tests contain extra index: {}", index); } } if (expectedVersions.isEmpty() == false) { @@ -287,9 +287,9 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { Collections.shuffle(indexes, random()); for (String index : indexes) { long startTime = System.currentTimeMillis(); - logger.info("--> Testing old index " + index); + logger.info("--> Testing old index {}", index); assertOldIndexWorks(index); - logger.info("--> Done testing " + index + ", took " + ((System.currentTimeMillis() - startTime) / 1000.0) + " seconds"); + logger.info("--> Done testing {}, took {} seconds", index, (System.currentTimeMillis() - startTime) / 1000.0); } } @@ -344,7 +344,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { SearchResponse searchRsp = searchReq.get(); ElasticsearchAssertions.assertNoFailures(searchRsp); long numDocs = searchRsp.getHits().getTotalHits(); - logger.info("Found " + numDocs + " in old index"); + logger.info("Found {} in old index", numDocs); logger.info("--> testing basic search with sort"); searchReq.addSort("long_sort", SortOrder.ASC); @@ -523,7 +523,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { for (String indexFile : indexes) { String indexName = indexFile.replace(".zip", "").toLowerCase(Locale.ROOT).replace("unsupported-", "index-"); Path nodeDir = getNodeDir(indexFile); - logger.info("Parsing cluster state files from index [" + indexName + "]"); + logger.info("Parsing cluster state files from index [{}]", indexName); assertNotNull(globalFormat.loadLatestState(logger, nodeDir)); // no exception Path indexDir = nodeDir.resolve("indices").resolve(indexName); assertNotNull(indexFormat.loadLatestState(logger, indexDir)); // no exception diff --git a/core/src/test/java/org/elasticsearch/bwcompat/RecoveryWithUnsupportedIndicesIT.java b/core/src/test/java/org/elasticsearch/bwcompat/RecoveryWithUnsupportedIndicesIT.java index 23163b86112..9fe83f65c45 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/RecoveryWithUnsupportedIndicesIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/RecoveryWithUnsupportedIndicesIT.java @@ -28,7 +28,7 @@ public class RecoveryWithUnsupportedIndicesIT extends StaticIndexBackwardCompati public void testUpgradeStartClusterOn_0_20_6() throws Exception { String indexName = "unsupported-0.20.6"; - logger.info("Checking static index " + indexName); + logger.info("Checking static index {}", indexName); Settings nodeSettings = prepareBackwardsDataDir(getBwcIndicesPath().resolve(indexName + ".zip"), NetworkModule.HTTP_ENABLED.getKey(), true); try { internalCluster().startNode(nodeSettings); diff --git a/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java b/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java index 483040209d0..5b81621e6dd 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java @@ -108,7 +108,7 @@ public class RestoreBackwardsCompatIT extends AbstractSnapshotIntegTestCase { for (String repoVersion : repoVersions) { if (expectedVersions.remove(repoVersion) == false) { - logger.warn("Old repositories tests contain extra repo: " + repoVersion); + logger.warn("Old repositories tests contain extra repo: {}", repoVersion); } } if (expectedVersions.isEmpty() == false) { diff --git a/core/src/test/java/org/elasticsearch/bwcompat/StaticIndexBackwardCompatibilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/StaticIndexBackwardCompatibilityIT.java index 794aea85487..3884d3475e1 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/StaticIndexBackwardCompatibilityIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/StaticIndexBackwardCompatibilityIT.java @@ -36,7 +36,7 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; public class StaticIndexBackwardCompatibilityIT extends ESIntegTestCase { public void loadIndex(String index, Object... settings) throws Exception { - logger.info("Checking static index " + index); + logger.info("Checking static index {}", index); Settings nodeSettings = prepareBackwardsDataDir(getDataPath(index + ".zip"), settings); internalCluster().startNode(nodeSettings); ensureGreen(index); diff --git a/core/src/test/java/org/elasticsearch/cli/CommandTests.java b/core/src/test/java/org/elasticsearch/cli/CommandTests.java new file mode 100644 index 00000000000..153bd4600b9 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/cli/CommandTests.java @@ -0,0 +1,123 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cli; + +import joptsimple.OptionSet; +import org.elasticsearch.test.ESTestCase; + +public class CommandTests extends ESTestCase { + + static class UserErrorCommand extends Command { + UserErrorCommand() { + super("Throws a user error"); + } + @Override + protected void execute(Terminal terminal, OptionSet options) throws Exception { + throw new UserError(ExitCodes.DATA_ERROR, "Bad input"); + } + } + + static class NoopCommand extends Command { + boolean executed = false; + NoopCommand() { + super("Does nothing"); + } + @Override + protected void execute(Terminal terminal, OptionSet options) throws Exception { + terminal.println("Normal output"); + terminal.println(Terminal.Verbosity.SILENT, "Silent output"); + terminal.println(Terminal.Verbosity.VERBOSE, "Verbose output"); + executed = true; + } + @Override + protected void printAdditionalHelp(Terminal terminal) { + terminal.println("Some extra help"); + } + } + + public void testHelp() throws Exception { + NoopCommand command = new NoopCommand(); + MockTerminal terminal = new MockTerminal(); + String[] args = {"-h"}; + int status = command.main(args, terminal); + String output = terminal.getOutput(); + assertEquals(output, ExitCodes.OK, status); + assertTrue(output, output.contains("Does nothing")); + assertTrue(output, output.contains("Some extra help")); + assertFalse(command.executed); + + command = new NoopCommand(); + String[] args2 = {"--help"}; + status = command.main(args2, terminal); + output = terminal.getOutput(); + assertEquals(output, ExitCodes.OK, status); + assertTrue(output, output.contains("Does nothing")); + assertTrue(output, output.contains("Some extra help")); + assertFalse(command.executed); + } + + public void testVerbositySilentAndVerbose() throws Exception { + MockTerminal terminal = new MockTerminal(); + NoopCommand command = new NoopCommand(); + String[] args = {"-v", "-s"}; + UserError e = expectThrows(UserError.class, () -> { + command.mainWithoutErrorHandling(args, terminal); + }); + assertTrue(e.getMessage(), e.getMessage().contains("Cannot specify -s and -v together")); + } + + public void testSilentVerbosity() throws Exception { + MockTerminal terminal = new MockTerminal(); + NoopCommand command = new NoopCommand(); + String[] args = {"-s"}; + command.main(args, terminal); + String output = terminal.getOutput(); + assertTrue(output, output.contains("Silent output")); + } + + public void testNormalVerbosity() throws Exception { + MockTerminal terminal = new MockTerminal(); + terminal.setVerbosity(Terminal.Verbosity.SILENT); + NoopCommand command = new NoopCommand(); + String[] args = {}; + command.main(args, terminal); + String output = terminal.getOutput(); + assertTrue(output, output.contains("Normal output")); + } + + public void testVerboseVerbosity() throws Exception { + MockTerminal terminal = new MockTerminal(); + NoopCommand command = new NoopCommand(); + String[] args = {"-v"}; + command.main(args, terminal); + String output = terminal.getOutput(); + assertTrue(output, output.contains("Verbose output")); + } + + public void testUserError() throws Exception { + MockTerminal terminal = new MockTerminal(); + UserErrorCommand command = new UserErrorCommand(); + String[] args = {}; + int status = command.main(args, terminal); + String output = terminal.getOutput(); + assertEquals(output, ExitCodes.DATA_ERROR, status); + assertTrue(output, output.contains("ERROR: Bad input")); + } +} diff --git a/core/src/test/java/org/elasticsearch/cli/MultiCommandTests.java b/core/src/test/java/org/elasticsearch/cli/MultiCommandTests.java new file mode 100644 index 00000000000..4f91d378440 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/cli/MultiCommandTests.java @@ -0,0 +1,105 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cli; + +import joptsimple.OptionSet; +import org.junit.Before; + +public class MultiCommandTests extends CommandTestCase { + + static class DummyMultiCommand extends MultiCommand { + DummyMultiCommand() { + super("A dummy multi command"); + } + } + + static class DummySubCommand extends Command { + DummySubCommand() { + super("A dummy subcommand"); + } + @Override + protected void execute(Terminal terminal, OptionSet options) throws Exception { + terminal.println("Arguments: " + options.nonOptionArguments().toString()); + } + } + + DummyMultiCommand multiCommand; + + @Before + public void setupCommand() { + multiCommand = new DummyMultiCommand(); + } + + @Override + protected Command newCommand() { + return multiCommand; + } + + public void testNoCommandsConfigured() throws Exception { + IllegalStateException e = expectThrows(IllegalStateException.class, () -> { + execute(); + }); + assertEquals("No subcommands configured", e.getMessage()); + } + + public void testUnknownCommand() throws Exception { + multiCommand.subcommands.put("something", new DummySubCommand()); + UserError e = expectThrows(UserError.class, () -> { + execute("somethingelse"); + }); + assertEquals(ExitCodes.USAGE, e.exitCode); + assertEquals("Unknown command [somethingelse]", e.getMessage()); + } + + public void testMissingCommand() throws Exception { + multiCommand.subcommands.put("command1", new DummySubCommand()); + UserError e = expectThrows(UserError.class, () -> { + execute(); + }); + assertEquals(ExitCodes.USAGE, e.exitCode); + assertEquals("Missing command", e.getMessage()); + } + + public void testHelp() throws Exception { + multiCommand.subcommands.put("command1", new DummySubCommand()); + multiCommand.subcommands.put("command2", new DummySubCommand()); + execute("-h"); + String output = terminal.getOutput(); + assertTrue(output, output.contains("command1")); + assertTrue(output, output.contains("command2")); + } + + public void testSubcommandHelp() throws Exception { + multiCommand.subcommands.put("command1", new DummySubCommand()); + multiCommand.subcommands.put("command2", new DummySubCommand()); + execute("command2", "-h"); + String output = terminal.getOutput(); + assertFalse(output, output.contains("command1")); + assertTrue(output, output.contains("A dummy subcommand")); + } + + public void testSubcommandArguments() throws Exception { + multiCommand.subcommands.put("command1", new DummySubCommand()); + execute("command1", "foo", "bar"); + String output = terminal.getOutput(); + assertFalse(output, output.contains("command1")); + assertTrue(output, output.contains("Arguments: [foo, bar]")); + } +} diff --git a/core/src/test/java/org/elasticsearch/common/cli/TerminalTests.java b/core/src/test/java/org/elasticsearch/cli/TerminalTests.java similarity index 94% rename from core/src/test/java/org/elasticsearch/common/cli/TerminalTests.java rename to core/src/test/java/org/elasticsearch/cli/TerminalTests.java index deb64e906b4..6673bdbc858 100644 --- a/core/src/test/java/org/elasticsearch/common/cli/TerminalTests.java +++ b/core/src/test/java/org/elasticsearch/cli/TerminalTests.java @@ -17,9 +17,11 @@ * under the License. */ -package org.elasticsearch.common.cli; +package org.elasticsearch.cli; -public class TerminalTests extends CliToolTestCase { +import org.elasticsearch.test.ESTestCase; + +public class TerminalTests extends ESTestCase { public void testVerbosity() throws Exception { MockTerminal terminal = new MockTerminal(); terminal.setVerbosity(Terminal.Verbosity.SILENT); @@ -48,7 +50,7 @@ public class TerminalTests extends CliToolTestCase { logTerminal.println(verbosity, text); String output = logTerminal.getOutput(); assertTrue(output, output.contains(text)); - logTerminal.resetOutput(); + logTerminal.reset(); } private void assertNotPrinted(MockTerminal logTerminal, Terminal.Verbosity verbosity, String text) throws Exception { diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java b/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java index 42f0e3a0601..f1a4496b136 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java @@ -35,6 +35,7 @@ import org.elasticsearch.common.inject.ModuleTestCase; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsModule; @@ -71,7 +72,7 @@ public class ClusterModuleTests extends ModuleTestCase { public void testRegisterClusterDynamicSetting() { SettingsModule module = new SettingsModule(Settings.EMPTY); - module.registerSetting(Setting.boolSetting("foo.bar", false, true, Setting.Scope.CLUSTER)); + module.registerSetting(Setting.boolSetting("foo.bar", false, Property.Dynamic, Property.NodeScope)); assertInstanceBinding(module, ClusterSettings.class, service -> service.hasDynamicSetting("foo.bar")); } @@ -86,7 +87,7 @@ public class ClusterModuleTests extends ModuleTestCase { public void testRegisterIndexDynamicSetting() { SettingsModule module = new SettingsModule(Settings.EMPTY); - module.registerSetting(Setting.boolSetting("foo.bar", false, true, Setting.Scope.INDEX)); + module.registerSetting(Setting.boolSetting("foo.bar", false, Property.Dynamic, Property.IndexScope)); assertInstanceBinding(module, IndexScopedSettings.class, service -> service.hasDynamicSetting("foo.bar")); } diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java b/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java index 813557e314b..351959460b1 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java @@ -608,13 +608,13 @@ public class ClusterServiceIT extends ESIntegTestCase { @Override public void onMaster() { - logger.info("on master [" + clusterService.localNode() + "]"); + logger.info("on master [{}]", clusterService.localNode()); master = true; } @Override public void offMaster() { - logger.info("off master [" + clusterService.localNode() + "]"); + logger.info("off master [{}]", clusterService.localNode()); master = false; } diff --git a/core/src/test/java/org/elasticsearch/cluster/NoMasterNodeIT.java b/core/src/test/java/org/elasticsearch/cluster/NoMasterNodeIT.java index 370f1464fd2..13b1d40b5d3 100644 --- a/core/src/test/java/org/elasticsearch/cluster/NoMasterNodeIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/NoMasterNodeIT.java @@ -235,7 +235,7 @@ public class NoMasterNodeIT extends ESIntegTestCase { ensureSearchable("test1", "test2"); ClusterStateResponse clusterState = client().admin().cluster().prepareState().get(); - logger.info("Cluster state:\n" + clusterState.getState().prettyPrint()); + logger.info("Cluster state:\n{}", clusterState.getState().prettyPrint()); internalCluster().stopRandomDataNode(); assertTrue(awaitBusy(() -> { diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java b/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java index 94336d23623..da6f270a79d 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java @@ -163,7 +163,7 @@ public class PrimaryAllocationIT extends ESIntegTestCase { for (IntObjectCursor> shardStoreStatuses : storeStatuses) { int shardId = shardStoreStatuses.key; IndicesShardStoresResponse.StoreStatus storeStatus = randomFrom(shardStoreStatuses.value); - logger.info("--> adding allocation command for shard " + shardId); + logger.info("--> adding allocation command for shard {}", shardId); // force allocation based on node id if (useStaleReplica) { rerouteBuilder.add(new AllocateStalePrimaryAllocationCommand("test", shardId, storeStatus.getNode().getId(), true)); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java index 741d62d74e6..40e24338f00 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java @@ -63,7 +63,7 @@ public class RoutingTableTests extends ESAllocationTestCase { this.numberOfReplicas = randomIntBetween(1, 5); this.shardsPerIndex = this.numberOfShards * (this.numberOfReplicas + 1); this.totalNumberOfShards = this.shardsPerIndex * 2; - logger.info("Setup test with " + this.numberOfShards + " shards and " + this.numberOfReplicas + " replicas."); + logger.info("Setup test with {} shards and {} replicas.", this.numberOfShards, this.numberOfReplicas); this.emptyRoutingTable = new RoutingTable.Builder().build(); MetaData metaData = MetaData.builder() .put(createIndexMetaData(TEST_INDEX_1)) @@ -81,7 +81,7 @@ public class RoutingTableTests extends ESAllocationTestCase { * puts primary shard routings into initializing state */ private void initPrimaries() { - logger.info("adding " + (this.numberOfReplicas + 1) + " nodes and performing rerouting"); + logger.info("adding {} nodes and performing rerouting", this.numberOfReplicas + 1); Builder discoBuilder = DiscoveryNodes.builder(); for (int i = 0; i < this.numberOfReplicas + 1; i++) { discoBuilder = discoBuilder.put(newNode("node" + i)); @@ -95,7 +95,7 @@ public class RoutingTableTests extends ESAllocationTestCase { private void startInitializingShards(String index) { this.clusterState = ClusterState.builder(clusterState).routingTable(this.testRoutingTable).build(); - logger.info("start primary shards for index " + index); + logger.info("start primary shards for index {}", index); RoutingAllocation.Result rerouteResult = ALLOCATION_SERVICE.applyStartedShards(this.clusterState, this.clusterState.getRoutingNodes().shardsWithState(index, INITIALIZING)); this.clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build(); this.testRoutingTable = rerouteResult.routingTable(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java index 91ba1f4999c..1c5f77ce408 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java @@ -301,7 +301,7 @@ public class AddIncrementallyTests extends ESAllocationTestCase { RoutingTable routingTable = routingTableBuilder.build(); - logger.info("start " + numberOfNodes + " nodes"); + logger.info("start {} nodes", numberOfNodes); DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(); for (int i = 0; i < numberOfNodes; i++) { nodes.put(newNode("node" + i)); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java index 8810fc47395..18f24504619 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java @@ -221,18 +221,10 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); - for (ShardRouting shard : clusterState.getRoutingNodes().shardsWithState(INITIALIZING)) { - logger.info(shard.toString()); - } - for (ShardRouting shard : clusterState.getRoutingNodes().shardsWithState(STARTED)) { - logger.info(shard.toString()); - } - for (ShardRouting shard : clusterState.getRoutingNodes().shardsWithState(RELOCATING)) { - logger.info(shard.toString()); - } - for (ShardRouting shard : clusterState.getRoutingNodes().shardsWithState(UNASSIGNED)) { - logger.info(shard.toString()); - } + logger.info("Initializing shards: {}", clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + logger.info("Started shards: {}", clusterState.getRoutingNodes().shardsWithState(STARTED)); + logger.info("Relocating shards: {}", clusterState.getRoutingNodes().shardsWithState(RELOCATING)); + logger.info("Unassigned shards: {}", clusterState.getRoutingNodes().shardsWithState(UNASSIGNED)); assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(5)); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/CatAllocationTestCase.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/CatAllocationTestCase.java index 1ba0c063255..be403510195 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/CatAllocationTestCase.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/CatAllocationTestCase.java @@ -147,12 +147,12 @@ public abstract class CatAllocationTestCase extends ESAllocationTestCase { if (initializing.isEmpty()) { break; } - logger.debug(initializing.toString()); + logger.debug("Initializing shards: {}", initializing); numRelocations += initializing.size(); routingTable = strategy.applyStartedShards(clusterState, initializing).routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); } - logger.debug("--> num relocations to get balance: " + numRelocations); + logger.debug("--> num relocations to get balance: {}", numRelocations); return clusterState; } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java index 813bee8f80e..3ec8df5cea6 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java @@ -409,14 +409,16 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { String fromId = r.currentNodeId(); assertThat(fromId, notNullValue()); assertThat(toId, notNullValue()); - logger.trace("From: " + fromId + " with Version: " + routingNodes.node(fromId).node().version() + " to: " + toId + " with Version: " + routingNodes.node(toId).node().version()); + logger.trace("From: {} with Version: {} to: {} with Version: {}", fromId, routingNodes.node(fromId).node().version(), + toId, routingNodes.node(toId).node().version()); assertTrue(routingNodes.node(toId).node().version().onOrAfter(routingNodes.node(fromId).node().version())); } else { ShardRouting primary = routingNodes.activePrimary(r); assertThat(primary, notNullValue()); String fromId = primary.currentNodeId(); String toId = r.relocatingNodeId(); - logger.trace("From: " + fromId + " with Version: " + routingNodes.node(fromId).node().version() + " to: " + toId + " with Version: " + routingNodes.node(toId).node().version()); + logger.trace("From: {} with Version: {} to: {} with Version: {}", fromId, routingNodes.node(fromId).node().version(), + toId, routingNodes.node(toId).node().version()); assertTrue(routingNodes.node(toId).node().version().onOrAfter(routingNodes.node(fromId).node().version())); } } @@ -428,7 +430,8 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { assertThat(primary, notNullValue()); String fromId = primary.currentNodeId(); String toId = r.currentNodeId(); - logger.trace("From: " + fromId + " with Version: " + routingNodes.node(fromId).node().version() + " to: " + toId + " with Version: " + routingNodes.node(toId).node().version()); + logger.trace("From: {} with Version: {} to: {} with Version: {}", fromId, routingNodes.node(fromId).node().version(), + toId, routingNodes.node(toId).node().version()); assertTrue(routingNodes.node(toId).node().version().onOrAfter(routingNodes.node(fromId).node().version())); } } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java index 0830747a9dd..e220c8eb0f6 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java @@ -212,7 +212,7 @@ public class ShardsLimitAllocationTests extends ESAllocationTestCase { assertThat(shardRouting.getIndexName(), equalTo("test1")); } - logger.info("update " + ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING.getKey() + " for test, see that things move"); + logger.info("update {} for test, see that things move", ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING.getKey()); metaData = MetaData.builder(metaData) .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 5) diff --git a/core/src/test/java/org/elasticsearch/cluster/settings/SettingsFilteringIT.java b/core/src/test/java/org/elasticsearch/cluster/settings/SettingsFilteringIT.java index ed03c918c31..e61bbc5f719 100644 --- a/core/src/test/java/org/elasticsearch/cluster/settings/SettingsFilteringIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/settings/SettingsFilteringIT.java @@ -22,19 +22,15 @@ package org.elasticsearch.cluster.settings; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; -import org.elasticsearch.common.inject.AbstractModule; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.inject.Module; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import java.util.Collection; -import java.util.Collections; import static org.elasticsearch.test.ESIntegTestCase.Scope.SUITE; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -50,6 +46,11 @@ public class SettingsFilteringIT extends ESIntegTestCase { } public static class SettingsFilteringPlugin extends Plugin { + public static final Setting SOME_NODE_SETTING = + Setting.boolSetting("some.node.setting", false, Property.NodeScope, Property.Filtered); + public static final Setting SOME_OTHER_NODE_SETTING = + Setting.boolSetting("some.other.node.setting", false, Property.NodeScope); + /** * The name of the plugin. */ @@ -72,10 +73,9 @@ public class SettingsFilteringIT extends ESIntegTestCase { } public void onModule(SettingsModule module) { - module.registerSetting(Setting.groupSetting("index.filter_test.", false, Setting.Scope.INDEX)); - module.registerSetting(Setting.boolSetting("some.node.setting", false, false, Setting.Scope.CLUSTER)); - module.registerSetting(Setting.boolSetting("some.other.node.setting", false, false, Setting.Scope.CLUSTER)); - module.registerSettingsFilter("some.node.setting"); + module.registerSetting(SOME_NODE_SETTING); + module.registerSetting(SOME_OTHER_NODE_SETTING); + module.registerSetting(Setting.groupSetting("index.filter_test.", Property.IndexScope)); module.registerSettingsFilter("index.filter_test.foo"); module.registerSettingsFilter("index.filter_test.bar*"); } @@ -104,8 +104,8 @@ public class SettingsFilteringIT extends ESIntegTestCase { for(NodeInfo info : nodeInfos.getNodes()) { Settings settings = info.getSettings(); assertNotNull(settings); - assertNull(settings.get("some.node.setting")); - assertTrue(settings.getAsBoolean("some.other.node.setting", false)); + assertNull(settings.get(SettingsFilteringPlugin.SOME_NODE_SETTING.getKey())); + assertTrue(settings.getAsBoolean(SettingsFilteringPlugin.SOME_OTHER_NODE_SETTING.getKey(), false)); assertEquals(settings.get("node.name"), info.getNode().getName()); } } diff --git a/core/src/test/java/org/elasticsearch/common/cache/CacheTests.java b/core/src/test/java/org/elasticsearch/common/cache/CacheTests.java index 921c66f7acb..3b88a3bdcfe 100644 --- a/core/src/test/java/org/elasticsearch/common/cache/CacheTests.java +++ b/core/src/test/java/org/elasticsearch/common/cache/CacheTests.java @@ -55,7 +55,7 @@ public class CacheTests extends ESTestCase { public void setUp() throws Exception { super.setUp(); numberOfEntries = randomIntBetween(1000, 10000); - logger.debug("numberOfEntries: " + numberOfEntries); + logger.debug("numberOfEntries: {}", numberOfEntries); } // cache some entries, then randomly lookup keys that do not exist, then check the stats diff --git a/core/src/test/java/org/elasticsearch/common/logging/LoggingConfigurationTests.java b/core/src/test/java/org/elasticsearch/common/logging/LoggingConfigurationTests.java index 0cca19d33bf..5c812cca0a7 100644 --- a/core/src/test/java/org/elasticsearch/common/logging/LoggingConfigurationTests.java +++ b/core/src/test/java/org/elasticsearch/common/logging/LoggingConfigurationTests.java @@ -27,7 +27,7 @@ import java.util.Arrays; import org.apache.log4j.Appender; import org.apache.log4j.Logger; -import org.elasticsearch.common.cli.MockTerminal; +import org.elasticsearch.cli.MockTerminal; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.node.internal.InternalSettingsPreparer; diff --git a/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java b/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java index a190de5b702..60848d0d459 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.index.IndexModule; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.TransportService; @@ -41,8 +42,8 @@ import java.util.function.Function; public class ScopedSettingsTests extends ESTestCase { public void testAddConsumer() { - Setting testSetting = Setting.intSetting("foo.bar", 1, true, Setting.Scope.CLUSTER); - Setting testSetting2 = Setting.intSetting("foo.bar.baz", 1, true, Setting.Scope.CLUSTER); + Setting testSetting = Setting.intSetting("foo.bar", 1, Property.Dynamic, Property.NodeScope); + Setting testSetting2 = Setting.intSetting("foo.bar.baz", 1, Property.Dynamic, Property.NodeScope); AbstractScopedSettings service = new ClusterSettings(Settings.EMPTY, Collections.singleton(testSetting)); AtomicInteger consumer = new AtomicInteger(); @@ -69,8 +70,8 @@ public class ScopedSettingsTests extends ESTestCase { } public void testApply() { - Setting testSetting = Setting.intSetting("foo.bar", 1, true, Setting.Scope.CLUSTER); - Setting testSetting2 = Setting.intSetting("foo.bar.baz", 1, true, Setting.Scope.CLUSTER); + Setting testSetting = Setting.intSetting("foo.bar", 1, Property.Dynamic, Property.NodeScope); + Setting testSetting2 = Setting.intSetting("foo.bar.baz", 1, Property.Dynamic, Property.NodeScope); AbstractScopedSettings service = new ClusterSettings(Settings.EMPTY, new HashSet<>(Arrays.asList(testSetting, testSetting2))); AtomicInteger consumer = new AtomicInteger(); @@ -139,7 +140,10 @@ public class ScopedSettingsTests extends ESTestCase { } public void testIsDynamic(){ - ClusterSettings settings = new ClusterSettings(Settings.EMPTY, new HashSet<>(Arrays.asList(Setting.intSetting("foo.bar", 1, true, Setting.Scope.CLUSTER), Setting.intSetting("foo.bar.baz", 1, false, Setting.Scope.CLUSTER)))); + ClusterSettings settings = + new ClusterSettings(Settings.EMPTY, + new HashSet<>(Arrays.asList(Setting.intSetting("foo.bar", 1, Property.Dynamic, Property.NodeScope), + Setting.intSetting("foo.bar.baz", 1, Property.NodeScope)))); assertFalse(settings.hasDynamicSetting("foo.bar.baz")); assertTrue(settings.hasDynamicSetting("foo.bar")); assertNotNull(settings.get("foo.bar.baz")); @@ -150,8 +154,8 @@ public class ScopedSettingsTests extends ESTestCase { } public void testDiff() throws IOException { - Setting foobarbaz = Setting.intSetting("foo.bar.baz", 1, false, Setting.Scope.CLUSTER); - Setting foobar = Setting.intSetting("foo.bar", 1, true, Setting.Scope.CLUSTER); + Setting foobarbaz = Setting.intSetting("foo.bar.baz", 1, Property.NodeScope); + Setting foobar = Setting.intSetting("foo.bar", 1, Property.Dynamic, Property.NodeScope); ClusterSettings settings = new ClusterSettings(Settings.EMPTY, new HashSet<>(Arrays.asList(foobar, foobarbaz))); Settings diff = settings.diff(Settings.builder().put("foo.bar", 5).build(), Settings.EMPTY); assertEquals(diff.getAsMap().size(), 1); @@ -247,22 +251,22 @@ public class ScopedSettingsTests extends ESTestCase { try { new IndexScopedSettings( - Settings.EMPTY, Collections.singleton(Setting.groupSetting("boo .", false, Setting.Scope.INDEX))); + Settings.EMPTY, Collections.singleton(Setting.groupSetting("boo .", Property.IndexScope))); fail(); } catch (IllegalArgumentException e) { assertEquals("illegal settings key: [boo .]", e.getMessage()); } new IndexScopedSettings( - Settings.EMPTY, Collections.singleton(Setting.groupSetting("boo.", false, Setting.Scope.INDEX))); + Settings.EMPTY, Collections.singleton(Setting.groupSetting("boo.", Property.IndexScope))); try { new IndexScopedSettings( - Settings.EMPTY, Collections.singleton(Setting.boolSetting("boo.", true, false, Setting.Scope.INDEX))); + Settings.EMPTY, Collections.singleton(Setting.boolSetting("boo.", true, Property.IndexScope))); fail(); } catch (IllegalArgumentException e) { assertEquals("illegal settings key: [boo.]", e.getMessage()); } new IndexScopedSettings( - Settings.EMPTY, Collections.singleton(Setting.boolSetting("boo", true, false, Setting.Scope.INDEX))); + Settings.EMPTY, Collections.singleton(Setting.boolSetting("boo", true, Property.IndexScope))); } public void testLoggingUpdates() { @@ -313,9 +317,9 @@ public class ScopedSettingsTests extends ESTestCase { public void testOverlappingComplexMatchSettings() { Set> settings = new LinkedHashSet<>(2); final boolean groupFirst = randomBoolean(); - final Setting groupSetting = Setting.groupSetting("foo.", false, Setting.Scope.CLUSTER); - final Setting listSetting = Setting.listSetting("foo.bar", Collections.emptyList(), Function.identity(), false, - Setting.Scope.CLUSTER); + final Setting groupSetting = Setting.groupSetting("foo.", Property.NodeScope); + final Setting listSetting = + Setting.listSetting("foo.bar", Collections.emptyList(), Function.identity(), Property.NodeScope); settings.add(groupFirst ? groupSetting : listSetting); settings.add(groupFirst ? listSetting : groupSetting); diff --git a/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java b/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java index df2014f7855..14fdcb1e0ac 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.common.settings; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.test.ESTestCase; @@ -28,24 +29,29 @@ import java.util.Collections; import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; public class SettingTests extends ESTestCase { public void testGet() { - Setting booleanSetting = Setting.boolSetting("foo.bar", false, true, Setting.Scope.CLUSTER); + Setting booleanSetting = Setting.boolSetting("foo.bar", false, Property.Dynamic, Property.NodeScope); assertFalse(booleanSetting.get(Settings.EMPTY)); assertFalse(booleanSetting.get(Settings.builder().put("foo.bar", false).build())); assertTrue(booleanSetting.get(Settings.builder().put("foo.bar", true).build())); } public void testByteSize() { - Setting byteSizeValueSetting = Setting.byteSizeSetting("a.byte.size", new ByteSizeValue(1024), true, Setting.Scope.CLUSTER); + Setting byteSizeValueSetting = + Setting.byteSizeSetting("a.byte.size", new ByteSizeValue(1024), Property.Dynamic, Property.NodeScope); assertFalse(byteSizeValueSetting.isGroupSetting()); ByteSizeValue byteSizeValue = byteSizeValueSetting.get(Settings.EMPTY); assertEquals(byteSizeValue.bytes(), 1024); - byteSizeValueSetting = Setting.byteSizeSetting("a.byte.size", s -> "2048b", true, Setting.Scope.CLUSTER); + byteSizeValueSetting = Setting.byteSizeSetting("a.byte.size", s -> "2048b", Property.Dynamic, Property.NodeScope); byteSizeValue = byteSizeValueSetting.get(Settings.EMPTY); assertEquals(byteSizeValue.bytes(), 2048); @@ -64,7 +70,7 @@ public class SettingTests extends ESTestCase { } public void testSimpleUpdate() { - Setting booleanSetting = Setting.boolSetting("foo.bar", false, true, Setting.Scope.CLUSTER); + Setting booleanSetting = Setting.boolSetting("foo.bar", false, Property.Dynamic, Property.NodeScope); AtomicReference atomicBoolean = new AtomicReference<>(null); ClusterSettings.SettingUpdater settingUpdater = booleanSetting.newUpdater(atomicBoolean::set, logger); Settings build = Settings.builder().put("foo.bar", false).build(); @@ -85,7 +91,7 @@ public class SettingTests extends ESTestCase { } public void testUpdateNotDynamic() { - Setting booleanSetting = Setting.boolSetting("foo.bar", false, false, Setting.Scope.CLUSTER); + Setting booleanSetting = Setting.boolSetting("foo.bar", false, Property.NodeScope); assertFalse(booleanSetting.isGroupSetting()); AtomicReference atomicBoolean = new AtomicReference<>(null); try { @@ -97,7 +103,7 @@ public class SettingTests extends ESTestCase { } public void testUpdaterIsIsolated() { - Setting booleanSetting = Setting.boolSetting("foo.bar", false, true, Setting.Scope.CLUSTER); + Setting booleanSetting = Setting.boolSetting("foo.bar", false, Property.Dynamic, Property.NodeScope); AtomicReference ab1 = new AtomicReference<>(null); AtomicReference ab2 = new AtomicReference<>(null); ClusterSettings.SettingUpdater settingUpdater = booleanSetting.newUpdater(ab1::set, logger); @@ -108,24 +114,28 @@ public class SettingTests extends ESTestCase { public void testDefault() { TimeValue defautlValue = TimeValue.timeValueMillis(randomIntBetween(0, 1000000)); - Setting setting = Setting.positiveTimeSetting("my.time.value", defautlValue, randomBoolean(), Setting.Scope.CLUSTER); + Setting setting = + Setting.positiveTimeSetting("my.time.value", defautlValue, Property.NodeScope); assertFalse(setting.isGroupSetting()); String aDefault = setting.getDefaultRaw(Settings.EMPTY); assertEquals(defautlValue.millis() + "ms", aDefault); assertEquals(defautlValue.millis(), setting.get(Settings.EMPTY).millis()); assertEquals(defautlValue, setting.getDefault(Settings.EMPTY)); - Setting secondaryDefault = new Setting<>("foo.bar", (s) -> s.get("old.foo.bar", "some_default"), (s) -> s, randomBoolean(), Setting.Scope.CLUSTER); + Setting secondaryDefault = + new Setting<>("foo.bar", (s) -> s.get("old.foo.bar", "some_default"), Function.identity(), Property.NodeScope); assertEquals("some_default", secondaryDefault.get(Settings.EMPTY)); assertEquals("42", secondaryDefault.get(Settings.builder().put("old.foo.bar", 42).build())); - Setting secondaryDefaultViaSettings = new Setting<>("foo.bar", secondaryDefault, (s) -> s, randomBoolean(), Setting.Scope.CLUSTER); + Setting secondaryDefaultViaSettings = + new Setting<>("foo.bar", secondaryDefault, Function.identity(), Property.NodeScope); assertEquals("some_default", secondaryDefaultViaSettings.get(Settings.EMPTY)); assertEquals("42", secondaryDefaultViaSettings.get(Settings.builder().put("old.foo.bar", 42).build())); } public void testComplexType() { AtomicReference ref = new AtomicReference<>(null); - Setting setting = new Setting<>("foo.bar", (s) -> "", (s) -> new ComplexType(s), true, Setting.Scope.CLUSTER); + Setting setting = new Setting<>("foo.bar", (s) -> "", (s) -> new ComplexType(s), + Property.Dynamic, Property.NodeScope); assertFalse(setting.isGroupSetting()); ref.set(setting.get(Settings.EMPTY)); ComplexType type = ref.get(); @@ -146,15 +156,17 @@ public class SettingTests extends ESTestCase { } public void testType() { - Setting integerSetting = Setting.intSetting("foo.int.bar", 1, true, Setting.Scope.CLUSTER); - assertEquals(integerSetting.getScope(), Setting.Scope.CLUSTER); - integerSetting = Setting.intSetting("foo.int.bar", 1, true, Setting.Scope.INDEX); - assertEquals(integerSetting.getScope(), Setting.Scope.INDEX); + Setting integerSetting = Setting.intSetting("foo.int.bar", 1, Property.Dynamic, Property.NodeScope); + assertThat(integerSetting.hasNodeScope(), is(true)); + assertThat(integerSetting.hasIndexScope(), is(false)); + integerSetting = Setting.intSetting("foo.int.bar", 1, Property.Dynamic, Property.IndexScope); + assertThat(integerSetting.hasIndexScope(), is(true)); + assertThat(integerSetting.hasNodeScope(), is(false)); } public void testGroups() { AtomicReference ref = new AtomicReference<>(null); - Setting setting = Setting.groupSetting("foo.bar.", true, Setting.Scope.CLUSTER); + Setting setting = Setting.groupSetting("foo.bar.", Property.Dynamic, Property.NodeScope); assertTrue(setting.isGroupSetting()); ClusterSettings.SettingUpdater settingUpdater = setting.newUpdater(ref::set, logger); @@ -232,8 +244,8 @@ public class SettingTests extends ESTestCase { public void testComposite() { Composite c = new Composite(); - Setting a = Setting.intSetting("foo.int.bar.a", 1, true, Setting.Scope.CLUSTER); - Setting b = Setting.intSetting("foo.int.bar.b", 1, true, Setting.Scope.CLUSTER); + Setting a = Setting.intSetting("foo.int.bar.a", 1, Property.Dynamic, Property.NodeScope); + Setting b = Setting.intSetting("foo.int.bar.b", 1, Property.Dynamic, Property.NodeScope); ClusterSettings.SettingUpdater> settingUpdater = Setting.compoundUpdater(c::set, a, b, logger); assertFalse(settingUpdater.apply(Settings.EMPTY, Settings.EMPTY)); assertNull(c.a); @@ -261,7 +273,8 @@ public class SettingTests extends ESTestCase { } public void testListSettings() { - Setting> listSetting = Setting.listSetting("foo.bar", Arrays.asList("foo,bar"), (s) -> s.toString(), true, Setting.Scope.CLUSTER); + Setting> listSetting = Setting.listSetting("foo.bar", Arrays.asList("foo,bar"), (s) -> s.toString(), + Property.Dynamic, Property.NodeScope); List value = listSetting.get(Settings.EMPTY); assertEquals(1, value.size()); assertEquals("foo,bar", value.get(0)); @@ -300,7 +313,8 @@ public class SettingTests extends ESTestCase { assertEquals(1, ref.get().size()); assertEquals("foo,bar", ref.get().get(0)); - Setting> otherSettings = Setting.listSetting("foo.bar", Collections.emptyList(), Integer::parseInt, true, Setting.Scope.CLUSTER); + Setting> otherSettings = Setting.listSetting("foo.bar", Collections.emptyList(), Integer::parseInt, + Property.Dynamic, Property.NodeScope); List defaultValue = otherSettings.get(Settings.EMPTY); assertEquals(0, defaultValue.size()); List intValues = otherSettings.get(Settings.builder().put("foo.bar", "0,1,2,3").build()); @@ -309,7 +323,8 @@ public class SettingTests extends ESTestCase { assertEquals(i, intValues.get(i).intValue()); } - Setting> settingWithFallback = Setting.listSetting("foo.baz", listSetting, s -> s, true, Setting.Scope.CLUSTER); + Setting> settingWithFallback = Setting.listSetting("foo.baz", listSetting, Function.identity(), + Property.Dynamic, Property.NodeScope); value = settingWithFallback.get(Settings.EMPTY); assertEquals(1, value.size()); assertEquals("foo,bar", value.get(0)); @@ -331,7 +346,8 @@ public class SettingTests extends ESTestCase { } public void testListSettingAcceptsNumberSyntax() { - Setting> listSetting = Setting.listSetting("foo.bar", Arrays.asList("foo,bar"), (s) -> s.toString(), true, Setting.Scope.CLUSTER); + Setting> listSetting = Setting.listSetting("foo.bar", Arrays.asList("foo,bar"), (s) -> s.toString(), + Property.Dynamic, Property.NodeScope); List input = Arrays.asList("test", "test1, test2", "test", ",,,,"); Settings.Builder builder = Settings.builder().putArray("foo.bar", input.toArray(new String[0])); // try to parse this really annoying format @@ -348,8 +364,8 @@ public class SettingTests extends ESTestCase { assertTrue(listSetting.match("foo.bar." + randomIntBetween(0,10000))); } - public void testPrefixKeySetting() { - Setting setting = Setting.prefixKeySetting("foo.", "false", Boolean::parseBoolean, false, Setting.Scope.CLUSTER); + public void testDynamicKeySetting() { + Setting setting = Setting.prefixKeySetting("foo.", "false", Boolean::parseBoolean, Property.NodeScope); assertTrue(setting.hasComplexMatcher()); assertTrue(setting.match("foo.bar")); assertFalse(setting.match("foo")); @@ -366,7 +382,8 @@ public class SettingTests extends ESTestCase { } public void testAdfixKeySetting() { - Setting setting = Setting.adfixKeySetting("foo", "enable", "false", Boolean::parseBoolean, false, Setting.Scope.CLUSTER); + Setting setting = + Setting.adfixKeySetting("foo", "enable", "false", Boolean::parseBoolean, Property.NodeScope); assertTrue(setting.hasComplexMatcher()); assertTrue(setting.match("foo.bar.enable")); assertTrue(setting.match("foo.baz.enable")); @@ -387,7 +404,7 @@ public class SettingTests extends ESTestCase { } public void testMinMaxInt() { - Setting integerSetting = Setting.intSetting("foo.bar", 1, 0, 10, false, Setting.Scope.CLUSTER); + Setting integerSetting = Setting.intSetting("foo.bar", 1, 0, 10, Property.NodeScope); try { integerSetting.get(Settings.builder().put("foo.bar", 11).build()); fail(); @@ -405,4 +422,39 @@ public class SettingTests extends ESTestCase { assertEquals(5, integerSetting.get(Settings.builder().put("foo.bar", 5).build()).intValue()); assertEquals(1, integerSetting.get(Settings.EMPTY).intValue()); } + + /** + * Only one single scope can be added to any setting + */ + public void testMutuallyExclusiveScopes() { + // Those should pass + Setting setting = Setting.simpleString("foo.bar", Property.NodeScope); + assertThat(setting.hasNodeScope(), is(true)); + assertThat(setting.hasIndexScope(), is(false)); + setting = Setting.simpleString("foo.bar", Property.IndexScope); + assertThat(setting.hasIndexScope(), is(true)); + assertThat(setting.hasNodeScope(), is(false)); + + // We accept settings with no scope but they will be rejected when we register with SettingsModule.registerSetting + setting = Setting.simpleString("foo.bar"); + assertThat(setting.hasIndexScope(), is(false)); + assertThat(setting.hasNodeScope(), is(false)); + + // We accept settings with multiple scopes but they will be rejected when we register with SettingsModule.registerSetting + setting = Setting.simpleString("foo.bar", Property.IndexScope, Property.NodeScope); + assertThat(setting.hasIndexScope(), is(true)); + assertThat(setting.hasNodeScope(), is(true)); + } + + /** + * We can't have Null properties + */ + public void testRejectNullProperties() { + try { + Setting.simpleString("foo.bar", (Property[]) null); + fail(); + } catch (IllegalArgumentException ex) { + assertThat(ex.getMessage(), containsString("properties can not be null for setting")); + } + } } diff --git a/core/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java b/core/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java index 4f790c2d3a9..bc6afda9a01 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java @@ -20,6 +20,10 @@ package org.elasticsearch.common.settings; import org.elasticsearch.common.inject.ModuleTestCase; +import org.elasticsearch.common.settings.Setting.Property; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; public class SettingsModuleTests extends ModuleTestCase { @@ -45,13 +49,13 @@ public class SettingsModuleTests extends ModuleTestCase { { Settings settings = Settings.builder().put("some.custom.setting", "2.0").build(); SettingsModule module = new SettingsModule(settings); - module.registerSetting(Setting.floatSetting("some.custom.setting", 1.0f, false, Setting.Scope.CLUSTER)); + module.registerSetting(Setting.floatSetting("some.custom.setting", 1.0f, Property.NodeScope)); assertInstanceBinding(module, Settings.class, (s) -> s == settings); } { Settings settings = Settings.builder().put("some.custom.setting", "false").build(); SettingsModule module = new SettingsModule(settings); - module.registerSetting(Setting.floatSetting("some.custom.setting", 1.0f, false, Setting.Scope.CLUSTER)); + module.registerSetting(Setting.floatSetting("some.custom.setting", 1.0f, Property.NodeScope)); try { assertInstanceBinding(module, Settings.class, (s) -> s == settings); fail(); @@ -131,12 +135,11 @@ public class SettingsModuleTests extends ModuleTestCase { public void testRegisterSettingsFilter() { Settings settings = Settings.builder().put("foo.bar", "false").put("bar.foo", false).put("bar.baz", false).build(); SettingsModule module = new SettingsModule(settings); - module.registerSetting(Setting.boolSetting("foo.bar", true, false, Setting.Scope.CLUSTER)); - module.registerSetting(Setting.boolSetting("bar.foo", true, false, Setting.Scope.CLUSTER)); - module.registerSetting(Setting.boolSetting("bar.baz", true, false, Setting.Scope.CLUSTER)); + module.registerSetting(Setting.boolSetting("foo.bar", true, Property.NodeScope)); + module.registerSetting(Setting.boolSetting("bar.foo", true, Property.NodeScope, Property.Filtered)); + module.registerSetting(Setting.boolSetting("bar.baz", true, Property.NodeScope)); module.registerSettingsFilter("foo.*"); - module.registerSettingsFilterIfMissing("bar.foo"); try { module.registerSettingsFilter("bar.foo"); fail(); @@ -149,4 +152,24 @@ public class SettingsModuleTests extends ModuleTestCase { assertInstanceBinding(module, SettingsFilter.class, (s) -> s.filter(settings).getAsMap().get("bar.baz").equals("false")); } + + public void testMutuallyExclusiveScopes() { + new SettingsModule(Settings.EMPTY).registerSetting(Setting.simpleString("foo.bar", Property.NodeScope)); + new SettingsModule(Settings.EMPTY).registerSetting(Setting.simpleString("foo.bar", Property.IndexScope)); + + // Those should fail + try { + new SettingsModule(Settings.EMPTY).registerSetting(Setting.simpleString("foo.bar")); + fail("No scope should fail"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("No scope found for setting")); + } + // Those should fail + try { + new SettingsModule(Settings.EMPTY).registerSetting(Setting.simpleString("foo.bar", Property.IndexScope, Property.NodeScope)); + fail("Multiple scopes should fail"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("More than one scope has been added to the setting")); + } + } } diff --git a/core/src/test/java/org/elasticsearch/consistencylevel/WriteConsistencyLevelIT.java b/core/src/test/java/org/elasticsearch/consistencylevel/WriteConsistencyLevelIT.java index b696c445f30..46c027cb91c 100644 --- a/core/src/test/java/org/elasticsearch/consistencylevel/WriteConsistencyLevelIT.java +++ b/core/src/test/java/org/elasticsearch/consistencylevel/WriteConsistencyLevelIT.java @@ -40,7 +40,7 @@ public class WriteConsistencyLevelIT extends ESIntegTestCase { prepareCreate("test", 1, Settings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 2)).execute().actionGet(); ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForActiveShards(1).setWaitForYellowStatus().execute().actionGet(); - logger.info("Done Cluster Health, status " + clusterHealth.getStatus()); + logger.info("Done Cluster Health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW)); @@ -60,7 +60,7 @@ public class WriteConsistencyLevelIT extends ESIntegTestCase { allowNodes("test", 2); clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForActiveShards(2).setWaitForYellowStatus().execute().actionGet(); - logger.info("Done Cluster Health, status " + clusterHealth.getStatus()); + logger.info("Done Cluster Health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW)); @@ -82,7 +82,7 @@ public class WriteConsistencyLevelIT extends ESIntegTestCase { allowNodes("test", 3); clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForActiveShards(3).setWaitForGreenStatus().execute().actionGet(); - logger.info("Done Cluster Health, status " + clusterHealth.getStatus()); + logger.info("Done Cluster Health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); diff --git a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java index 3948a4bab90..29997aec8f6 100644 --- a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java @@ -208,7 +208,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { // Figure out what is the elected master node final String masterNode = internalCluster().getMasterName(); - logger.info("---> legit elected master node=" + masterNode); + logger.info("---> legit elected master node={}", masterNode); // Pick a node that isn't the elected master. Set nonMasters = new HashSet<>(nodes); @@ -496,7 +496,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { } int docsPerIndexer = randomInt(3); - logger.info("indexing " + docsPerIndexer + " docs per indexer before partition"); + logger.info("indexing {} docs per indexer before partition", docsPerIndexer); countDownLatchRef.set(new CountDownLatch(docsPerIndexer * indexers.size())); for (Semaphore semaphore : semaphores) { semaphore.release(docsPerIndexer); @@ -508,7 +508,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { disruptionScheme.startDisrupting(); docsPerIndexer = 1 + randomInt(5); - logger.info("indexing " + docsPerIndexer + " docs per indexer during partition"); + logger.info("indexing {} docs per indexer during partition", docsPerIndexer); countDownLatchRef.set(new CountDownLatch(docsPerIndexer * indexers.size())); Collections.shuffle(semaphores, random()); for (Semaphore semaphore : semaphores) { @@ -539,11 +539,11 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { } } finally { if (exceptedExceptions.size() > 0) { - StringBuilder sb = new StringBuilder("Indexing exceptions during disruption:"); + StringBuilder sb = new StringBuilder(); for (Exception e : exceptedExceptions) { sb.append("\n").append(e.getMessage()); } - logger.debug(sb.toString()); + logger.debug("Indexing exceptions during disruption: {}", sb); } logger.info("shutting down indexers"); stop.set(true); @@ -731,7 +731,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { IndexResponse indexResponse = internalCluster().client(notIsolatedNode).prepareIndex("test", "type").setSource("field", "value").get(); assertThat(indexResponse.getVersion(), equalTo(1L)); - logger.info("Verifying if document exists via node[" + notIsolatedNode + "]"); + logger.info("Verifying if document exists via node[{}]", notIsolatedNode); GetResponse getResponse = internalCluster().client(notIsolatedNode).prepareGet("test", "type", indexResponse.getId()) .setPreference("_local") .get(); @@ -745,7 +745,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { ensureGreen("test"); for (String node : nodes) { - logger.info("Verifying if document exists after isolating node[" + isolatedNode + "] via node[" + node + "]"); + logger.info("Verifying if document exists after isolating node[{}] via node[{}]", isolatedNode, node); getResponse = internalCluster().client(node).prepareGet("test", "type", indexResponse.getId()) .setPreference("_local") .get(); @@ -764,7 +764,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { List nodes = startCluster(4, -1, new int[]{0}); // Figure out what is the elected master node final String masterNode = internalCluster().getMasterName(); - logger.info("---> legit elected master node=" + masterNode); + logger.info("---> legit elected master node={}", masterNode); List otherNodes = new ArrayList<>(nodes); otherNodes.remove(masterNode); otherNodes.remove(nodes.get(0)); // <-- Don't isolate the node that is in the unicast endpoint for all the other nodes. diff --git a/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java b/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java index 00c549ef2f1..dfd8ba51a54 100644 --- a/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java @@ -225,7 +225,7 @@ public class MetaDataStateFormatTests extends ESTestCase { msg.append(" after: [").append(checksumAfterCorruption).append("]"); msg.append(" checksum value after corruption: ").append(actualChecksumAfterCorruption).append("]"); msg.append(" file: ").append(fileToCorrupt.getFileName().toString()).append(" length: ").append(dir.fileLength(fileToCorrupt.getFileName().toString())); - logger.debug(msg.toString()); + logger.debug("{}", msg.toString()); assumeTrue("Checksum collision - " + msg.toString(), checksumAfterCorruption != checksumBeforeCorruption // collision || actualChecksumAfterCorruption != checksumBeforeCorruption); // checksum corrupted diff --git a/core/src/test/java/org/elasticsearch/gateway/QuorumGatewayIT.java b/core/src/test/java/org/elasticsearch/gateway/QuorumGatewayIT.java index 399ef9badab..a1d16bfd884 100644 --- a/core/src/test/java/org/elasticsearch/gateway/QuorumGatewayIT.java +++ b/core/src/test/java/org/elasticsearch/gateway/QuorumGatewayIT.java @@ -82,7 +82,7 @@ public class QuorumGatewayIT extends ESIntegTestCase { assertTrue(awaitBusy(() -> { logger.info("--> running cluster_health (wait for the shards to startup)"); ClusterHealthResponse clusterHealth = activeClient.admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForNodes("2").waitForActiveShards(test.numPrimaries * 2)).actionGet(); - logger.info("--> done cluster_health, status " + clusterHealth.getStatus()); + logger.info("--> done cluster_health, status {}", clusterHealth.getStatus()); return (!clusterHealth.isTimedOut()) && clusterHealth.getStatus() == ClusterHealthStatus.YELLOW; }, 30, TimeUnit.SECONDS)); logger.info("--> one node is closed -- index 1 document into the remaining nodes"); diff --git a/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java b/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java index 8fd6e303220..4da9c2df177 100644 --- a/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java +++ b/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java @@ -380,7 +380,7 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase { assertSyncIdsNotNull(); } - logger.info("--> disabling allocation while the cluster is shut down", useSyncIds ? "" : " a second time"); + logger.info("--> disabling allocation while the cluster is shut down{}", useSyncIds ? "" : " a second time"); // Disable allocations while we are closing nodes client().admin().cluster().prepareUpdateSettings() .setTransientSettings(settingsBuilder() diff --git a/core/src/test/java/org/elasticsearch/gateway/ReusePeerRecoverySharedTest.java b/core/src/test/java/org/elasticsearch/gateway/ReusePeerRecoverySharedTest.java index 6f188ef4280..d28e5333225 100644 --- a/core/src/test/java/org/elasticsearch/gateway/ReusePeerRecoverySharedTest.java +++ b/core/src/test/java/org/elasticsearch/gateway/ReusePeerRecoverySharedTest.java @@ -100,7 +100,7 @@ public class ReusePeerRecoverySharedTest { assertSyncIdsNotNull(); } - logger.info("--> disabling allocation while the cluster is shut down", useSyncIds ? "" : " a second time"); + logger.info("--> disabling allocation while the cluster is shut down{}", useSyncIds ? "" : " a second time"); // Disable allocations while we are closing nodes client().admin().cluster().prepareUpdateSettings().setTransientSettings( settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), EnableAllocationDecider.Allocation.NONE)) diff --git a/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java b/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java index 057186d597f..a02c3df00ac 100644 --- a/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java +++ b/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java @@ -34,6 +34,7 @@ import org.elasticsearch.cache.recycler.PageCacheRecycler; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.env.Environment; @@ -194,9 +195,9 @@ public class IndexModuleTests extends ESTestCase { public void testListener() throws IOException { - Setting booleanSetting = Setting.boolSetting("foo.bar", false, true, Setting.Scope.INDEX); + Setting booleanSetting = Setting.boolSetting("foo.bar", false, Property.Dynamic, Property.IndexScope); IndexModule module = new IndexModule(IndexSettingsModule.newIndexSettings(index, settings, booleanSetting), null, new AnalysisRegistry(null, environment)); - Setting booleanSetting2 = Setting.boolSetting("foo.bar.baz", false, true, Setting.Scope.INDEX); + Setting booleanSetting2 = Setting.boolSetting("foo.bar.baz", false, Property.Dynamic, Property.IndexScope); AtomicBoolean atomicBoolean = new AtomicBoolean(false); module.addSettingsUpdateConsumer(booleanSetting, atomicBoolean::set); diff --git a/core/src/test/java/org/elasticsearch/index/IndexSettingsTests.java b/core/src/test/java/org/elasticsearch/index/IndexSettingsTests.java index 677c8358fb0..46d99e3b4bc 100644 --- a/core/src/test/java/org/elasticsearch/index/IndexSettingsTests.java +++ b/core/src/test/java/org/elasticsearch/index/IndexSettingsTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; @@ -44,7 +45,8 @@ public class IndexSettingsTests extends ESTestCase { Version version = VersionUtils.getPreviousVersion(); Settings theSettings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).put(IndexMetaData.SETTING_INDEX_UUID, "0xdeadbeef").build(); final AtomicInteger integer = new AtomicInteger(0); - Setting integerSetting = Setting.intSetting("index.test.setting.int", -1, true, Setting.Scope.INDEX); + Setting integerSetting = Setting.intSetting("index.test.setting.int", -1, + Property.Dynamic, Property.IndexScope); IndexMetaData metaData = newIndexMeta("index", theSettings); IndexSettings settings = newIndexSettings(newIndexMeta("index", theSettings), Settings.EMPTY, integerSetting); settings.getScopedSettings().addSettingsUpdateConsumer(integerSetting, integer::set); @@ -65,8 +67,10 @@ public class IndexSettingsTests extends ESTestCase { .put(IndexMetaData.SETTING_INDEX_UUID, "0xdeadbeef").build(); final AtomicInteger integer = new AtomicInteger(0); final StringBuilder builder = new StringBuilder(); - Setting integerSetting = Setting.intSetting("index.test.setting.int", -1, true, Setting.Scope.INDEX); - Setting notUpdated = new Setting<>("index.not.updated", "", Function.identity(), true, Setting.Scope.INDEX); + Setting integerSetting = Setting.intSetting("index.test.setting.int", -1, + Property.Dynamic, Property.IndexScope); + Setting notUpdated = new Setting<>("index.not.updated", "", Function.identity(), + Property.Dynamic, Property.IndexScope); IndexSettings settings = newIndexSettings(newIndexMeta("index", theSettings), Settings.EMPTY, integerSetting, notUpdated); settings.getScopedSettings().addSettingsUpdateConsumer(integerSetting, integer::set); @@ -128,7 +132,7 @@ public class IndexSettingsTests extends ESTestCase { Settings nodeSettings = Settings.settingsBuilder().put("index.foo.bar", 43).build(); final AtomicInteger indexValue = new AtomicInteger(0); - Setting integerSetting = Setting.intSetting("index.foo.bar", -1, true, Setting.Scope.INDEX); + Setting integerSetting = Setting.intSetting("index.foo.bar", -1, Property.Dynamic, Property.IndexScope); IndexSettings settings = newIndexSettings(newIndexMeta("index", theSettings), nodeSettings, integerSetting); settings.getScopedSettings().addSettingsUpdateConsumer(integerSetting, indexValue::set); assertEquals(numReplicas, settings.getNumberOfReplicas()); diff --git a/core/src/test/java/org/elasticsearch/index/SettingsListenerIT.java b/core/src/test/java/org/elasticsearch/index/SettingsListenerIT.java index 7dbff244fcc..e9e8dcfc007 100644 --- a/core/src/test/java/org/elasticsearch/index/SettingsListenerIT.java +++ b/core/src/test/java/org/elasticsearch/index/SettingsListenerIT.java @@ -21,6 +21,7 @@ package org.elasticsearch.index; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.Module; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.plugins.Plugin; @@ -43,7 +44,8 @@ public class SettingsListenerIT extends ESIntegTestCase { public static class SettingsListenerPlugin extends Plugin { private final SettingsTestingService service = new SettingsTestingService(); - private static final Setting SETTING = Setting.intSetting("index.test.new.setting", 0, true, Setting.Scope.INDEX); + private static final Setting SETTING = Setting.intSetting("index.test.new.setting", 0, + Property.Dynamic, Property.IndexScope); /** * The name of the plugin. */ @@ -93,7 +95,8 @@ public class SettingsListenerIT extends ESIntegTestCase { public static class SettingsTestingService { public volatile int value; - public static Setting VALUE = Setting.intSetting("index.test.new.setting", -1, -1, true, Setting.Scope.INDEX); + public static Setting VALUE = Setting.intSetting("index.test.new.setting", -1, -1, + Property.Dynamic, Property.IndexScope); public void setValue(int value) { this.value = value; diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/FilterFieldDataTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/FilterFieldDataTests.java index 49cb414208d..3d2b77246a8 100644 --- a/core/src/test/java/org/elasticsearch/index/fielddata/FilterFieldDataTests.java +++ b/core/src/test/java/org/elasticsearch/index/fielddata/FilterFieldDataTests.java @@ -145,7 +145,7 @@ public class FilterFieldDataTests extends AbstractFieldDataTestCase { } writer.addDocument(d); } - logger.debug(hundred + " " + ten + " " + five); + logger.debug("{} {} {}", hundred, ten, five); writer.forceMerge(1, true); LeafReaderContext context = refreshReader(); String[] formats = new String[] { "paged_bytes"}; diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/fieldcomparator/ReplaceMissingTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/fieldcomparator/ReplaceMissingTests.java index 63b66f47d1a..a291311c3bc 100644 --- a/core/src/test/java/org/elasticsearch/index/fielddata/fieldcomparator/ReplaceMissingTests.java +++ b/core/src/test/java/org/elasticsearch/index/fielddata/fieldcomparator/ReplaceMissingTests.java @@ -52,7 +52,7 @@ public class ReplaceMissingTests extends ESTestCase { iw.close(); DirectoryReader reader = DirectoryReader.open(dir); - LeafReader ar = getOnlySegmentReader(reader); + LeafReader ar = getOnlyLeafReader(reader); SortedDocValues raw = ar.getSortedDocValues("field"); assertEquals(2, raw.getValueCount()); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java b/core/src/test/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java index 966edf82621..b7194a3829b 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java @@ -130,12 +130,6 @@ public abstract class FieldTypeTestCase extends ESTestCase { other.setSimilarity(new BM25SimilarityProvider("bar", Settings.EMPTY)); } }, - new Modifier("norms.loading", true) { - @Override - public void modify(MappedFieldType ft) { - ft.setNormsLoading(MappedFieldType.Loading.LAZY); - } - }, new Modifier("fielddata", true) { @Override public void modify(MappedFieldType ft) { @@ -217,7 +211,6 @@ public abstract class FieldTypeTestCase extends ESTestCase { ", searchAnalyzer=" + ft.searchAnalyzer() + ", searchQuoteAnalyzer=" + ft.searchQuoteAnalyzer() + ", similarity=" + ft.similarity() + - ", normsLoading=" + ft.normsLoading() + ", fieldDataType=" + ft.fieldDataType() + ", nullValue=" + ft.nullValue() + ", nullValueAsString='" + ft.nullValueAsString() + "'" + diff --git a/core/src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java index 191ce5d477e..501c538b870 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java @@ -223,7 +223,7 @@ public class SimpleAllMapperTests extends ESSingleNodeTestCase { } public void testRandom() throws Exception { - boolean omitNorms = false; + boolean norms = true; boolean stored = false; boolean enabled = true; boolean tv_stored = false; @@ -239,7 +239,7 @@ public class SimpleAllMapperTests extends ESSingleNodeTestCase { allDefault = false; mappingBuilder.startObject("_all"); if (randomBoolean()) { - booleanOptionList.add(new Tuple<>("omit_norms", omitNorms = randomBoolean())); + booleanOptionList.add(new Tuple<>("norms", norms = randomBoolean())); } if (randomBoolean()) { booleanOptionList.add(new Tuple<>("store", stored = randomBoolean())); @@ -272,7 +272,7 @@ public class SimpleAllMapperTests extends ESSingleNodeTestCase { DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser(); String mapping = mappingBuilder.endObject().endObject().bytes().toUtf8(); - logger.info(mapping); + logger.info("Mapping: {}", mapping); DocumentMapper docMapper = parser.parse("test", new CompressedXContent(mapping)); String builtMapping = docMapper.mappingSource().string(); // reparse it @@ -285,7 +285,7 @@ public class SimpleAllMapperTests extends ESSingleNodeTestCase { Document doc = builtDocMapper.parse("test", "test", "1", new BytesArray(json)).rootDoc(); AllField field = (AllField) doc.getField("_all"); if (enabled) { - assertThat(field.fieldType().omitNorms(), equalTo(omitNorms)); + assertThat(field.fieldType().omitNorms(), equalTo(!norms)); assertThat(field.fieldType().stored(), equalTo(stored)); assertThat(field.fieldType().storeTermVectorOffsets(), equalTo(tv_offsets)); assertThat(field.fieldType().storeTermVectorPayloads(), equalTo(tv_payloads)); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/boost/CustomBoostMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/boost/CustomBoostMappingTests.java index 91a0ca15cd4..490477d67e7 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/boost/CustomBoostMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/boost/CustomBoostMappingTests.java @@ -85,18 +85,17 @@ public class CustomBoostMappingTests extends ESSingleNodeTestCase { } public void testBackCompatFieldMappingBoostValues() throws Exception { - String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties") - .startObject("s_field").field("type", "keyword").field("boost", 2.0f).endObject() - .startObject("l_field").field("type", "long").field("boost", 3.0f).startObject("norms").field("enabled", true).endObject().endObject() - .startObject("i_field").field("type", "integer").field("boost", 4.0f).startObject("norms").field("enabled", true).endObject().endObject() - .startObject("sh_field").field("type", "short").field("boost", 5.0f).startObject("norms").field("enabled", true).endObject().endObject() - .startObject("b_field").field("type", "byte").field("boost", 6.0f).startObject("norms").field("enabled", true).endObject().endObject() - .startObject("d_field").field("type", "double").field("boost", 7.0f).startObject("norms").field("enabled", true).endObject().endObject() - .startObject("f_field").field("type", "float").field("boost", 8.0f).startObject("norms").field("enabled", true).endObject().endObject() - .startObject("date_field").field("type", "date").field("boost", 9.0f).startObject("norms").field("enabled", true).endObject().endObject() - .endObject().endObject().endObject().string(); - { + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties") + .startObject("s_field").field("type", "keyword").field("boost", 2.0f).endObject() + .startObject("l_field").field("type", "long").field("boost", 3.0f).endObject() + .startObject("i_field").field("type", "integer").field("boost", 4.0f).endObject() + .startObject("sh_field").field("type", "short").field("boost", 5.0f).endObject() + .startObject("b_field").field("type", "byte").field("boost", 6.0f).endObject() + .startObject("d_field").field("type", "double").field("boost", 7.0f).endObject() + .startObject("f_field").field("type", "float").field("boost", 8.0f).endObject() + .startObject("date_field").field("type", "date").field("boost", 9.0f).endObject() + .endObject().endObject().endObject().string(); IndexService indexService = createIndex("test", BW_SETTINGS); QueryShardContext context = indexService.newQueryShardContext(); DocumentMapper mapper = indexService.mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); @@ -122,16 +121,34 @@ public class CustomBoostMappingTests extends ESSingleNodeTestCase { .endObject().bytes()); assertThat(doc.rootDoc().getField("s_field").boost(), equalTo(2.0f)); + assertThat(doc.rootDoc().getField("s_field").fieldType().omitNorms(), equalTo(false)); assertThat(doc.rootDoc().getField("l_field").boost(), equalTo(3.0f)); + assertThat(doc.rootDoc().getField("l_field").fieldType().omitNorms(), equalTo(false)); assertThat(doc.rootDoc().getField("i_field").boost(), equalTo(4.0f)); + assertThat(doc.rootDoc().getField("i_field").fieldType().omitNorms(), equalTo(false)); assertThat(doc.rootDoc().getField("sh_field").boost(), equalTo(5.0f)); + assertThat(doc.rootDoc().getField("sh_field").fieldType().omitNorms(), equalTo(false)); assertThat(doc.rootDoc().getField("b_field").boost(), equalTo(6.0f)); + assertThat(doc.rootDoc().getField("b_field").fieldType().omitNorms(), equalTo(false)); assertThat(doc.rootDoc().getField("d_field").boost(), equalTo(7.0f)); + assertThat(doc.rootDoc().getField("d_field").fieldType().omitNorms(), equalTo(false)); assertThat(doc.rootDoc().getField("f_field").boost(), equalTo(8.0f)); + assertThat(doc.rootDoc().getField("f_field").fieldType().omitNorms(), equalTo(false)); assertThat(doc.rootDoc().getField("date_field").boost(), equalTo(9.0f)); + assertThat(doc.rootDoc().getField("date_field").fieldType().omitNorms(), equalTo(false)); } { + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties") + .startObject("s_field").field("type", "keyword").field("boost", 2.0f).endObject() + .startObject("l_field").field("type", "long").field("boost", 3.0f).endObject() + .startObject("i_field").field("type", "integer").field("boost", 4.0f).endObject() + .startObject("sh_field").field("type", "short").field("boost", 5.0f).endObject() + .startObject("b_field").field("type", "byte").field("boost", 6.0f).endObject() + .startObject("d_field").field("type", "double").field("boost", 7.0f).endObject() + .startObject("f_field").field("type", "float").field("boost", 8.0f).endObject() + .startObject("date_field").field("type", "date").field("boost", 9.0f).endObject() + .endObject().endObject().endObject().string(); IndexService indexService = createIndex("text"); QueryShardContext context = indexService.newQueryShardContext(); DocumentMapper mapper = indexService.mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); @@ -157,13 +174,21 @@ public class CustomBoostMappingTests extends ESSingleNodeTestCase { .endObject().bytes()); assertThat(doc.rootDoc().getField("s_field").boost(), equalTo(1f)); + assertThat(doc.rootDoc().getField("s_field").fieldType().omitNorms(), equalTo(true)); assertThat(doc.rootDoc().getField("l_field").boost(), equalTo(1f)); + assertThat(doc.rootDoc().getField("l_field").fieldType().omitNorms(), equalTo(true)); assertThat(doc.rootDoc().getField("i_field").boost(), equalTo(1f)); + assertThat(doc.rootDoc().getField("i_field").fieldType().omitNorms(), equalTo(true)); assertThat(doc.rootDoc().getField("sh_field").boost(), equalTo(1f)); + assertThat(doc.rootDoc().getField("sh_field").fieldType().omitNorms(), equalTo(true)); assertThat(doc.rootDoc().getField("b_field").boost(), equalTo(1f)); + assertThat(doc.rootDoc().getField("b_field").fieldType().omitNorms(), equalTo(true)); assertThat(doc.rootDoc().getField("d_field").boost(), equalTo(1f)); + assertThat(doc.rootDoc().getField("d_field").fieldType().omitNorms(), equalTo(true)); assertThat(doc.rootDoc().getField("f_field").boost(), equalTo(1f)); + assertThat(doc.rootDoc().getField("f_field").fieldType().omitNorms(), equalTo(true)); assertThat(doc.rootDoc().getField("date_field").boost(), equalTo(1f)); + assertThat(doc.rootDoc().getField("date_field").fieldType().omitNorms(), equalTo(true)); } } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/boost/FieldLevelBoostTests.java b/core/src/test/java/org/elasticsearch/index/mapper/boost/FieldLevelBoostTests.java index b1fde6bdd67..90121e66ea8 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/boost/FieldLevelBoostTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/boost/FieldLevelBoostTests.java @@ -102,13 +102,13 @@ public class FieldLevelBoostTests extends ESSingleNodeTestCase { public void testBackCompatFieldLevelMappingBoost() throws Exception { String mapping = XContentFactory.jsonBuilder().startObject().startObject("person").startObject("properties") .startObject("str_field").field("type", "keyword").field("boost", "2.0").endObject() - .startObject("int_field").field("type", "integer").field("boost", "3.0").startObject("norms").field("enabled", true).endObject().endObject() - .startObject("byte_field").field("type", "byte").field("boost", "4.0").startObject("norms").field("enabled", true).endObject().endObject() - .startObject("date_field").field("type", "date").field("boost", "5.0").startObject("norms").field("enabled", true).endObject().endObject() - .startObject("double_field").field("type", "double").field("boost", "6.0").startObject("norms").field("enabled", true).endObject().endObject() - .startObject("float_field").field("type", "float").field("boost", "7.0").startObject("norms").field("enabled", true).endObject().endObject() - .startObject("long_field").field("type", "long").field("boost", "8.0").startObject("norms").field("enabled", true).endObject().endObject() - .startObject("short_field").field("type", "short").field("boost", "9.0").startObject("norms").field("enabled", true).endObject().endObject() + .startObject("int_field").field("type", "integer").field("boost", "3.0").endObject() + .startObject("byte_field").field("type", "byte").field("boost", "4.0").endObject() + .startObject("date_field").field("type", "date").field("boost", "5.0").endObject() + .startObject("double_field").field("type", "double").field("boost", "6.0").endObject() + .startObject("float_field").field("type", "float").field("boost", "7.0").endObject() + .startObject("long_field").field("type", "long").field("boost", "8.0").endObject() + .startObject("short_field").field("type", "short").field("boost", "9.0").endObject() .string(); { diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/KeywordFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/KeywordFieldMapperTests.java index 8af92f266a5..28867ed1f73 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/core/KeywordFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/core/KeywordFieldMapperTests.java @@ -24,22 +24,33 @@ import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.IndexableFieldType; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.test.InternalSettingsPlugin; import org.junit.Before; import java.io.IOException; import java.util.Arrays; +import java.util.Collection; import static org.hamcrest.Matchers.equalTo; public class KeywordFieldMapperTests extends ESSingleNodeTestCase { + @Override + protected Collection> getPlugins() { + return pluginList(InternalSettingsPlugin.class); + } + IndexService indexService; DocumentMapperParser parser; @@ -232,4 +243,51 @@ public class KeywordFieldMapperTests extends ESSingleNodeTestCase { assertEquals("The [keyword] field does not support positions, got [index_options]=" + indexOptions, e.getMessage()); } } + + public void testBoost() throws IOException { + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field").field("type", "keyword").field("boost", 2f).endObject().endObject() + .endObject().endObject().string(); + + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + + assertEquals(mapping, mapper.mappingSource().toString()); + } + + public void testBoostImplicitlyEnablesNormsOnOldIndex() throws IOException { + indexService = createIndex("test2", + Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_3_0).build()); + parser = indexService.mapperService().documentMapperParser(); + + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field").field("type", "keyword").field("boost", 2f).endObject().endObject() + .endObject().endObject().string(); + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + + String expectedMapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field").field("type", "keyword") + .field("boost", 2f).field("norms", true).endObject().endObject() + .endObject().endObject().string(); + assertEquals(expectedMapping, mapper.mappingSource().toString()); + } + + public void testEnableNorms() throws IOException { + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field").field("type", "keyword").field("norms", true).endObject().endObject() + .endObject().endObject().string(); + + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + + assertEquals(mapping, mapper.mappingSource().toString()); + + ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + .startObject() + .field("field", "1234") + .endObject() + .bytes()); + + IndexableField[] fields = doc.rootDoc().getFields("field"); + assertEquals(2, fields.length); + assertFalse(fields[0].fieldType().omitNorms()); + } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/StringMappingUpgradeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/StringMappingUpgradeTests.java index 4b2fe9a7102..d49f50da0ab 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/core/StringMappingUpgradeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/core/StringMappingUpgradeTests.java @@ -39,6 +39,7 @@ import org.elasticsearch.test.InternalSettingsPlugin; import java.io.IOException; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.instanceOf; @@ -130,6 +131,7 @@ public class StringMappingUpgradeTests extends ESSingleNodeTestCase { XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("field").field("type", "string"); boolean keyword = randomBoolean(); + boolean hasNorms = keyword == false; boolean shouldUpgrade = true; if (keyword) { mapping.field("index", randomBoolean() ? "not_analyzed" : "no"); @@ -143,7 +145,12 @@ public class StringMappingUpgradeTests extends ESSingleNodeTestCase { mapping.field("doc_values", randomBoolean()); } if (randomBoolean()) { - mapping.field("omit_norms", randomBoolean()); + hasNorms = randomBoolean(); + if (randomBoolean()) { + mapping.field("omit_norms", hasNorms == false); + } else { + mapping.field("norms", Collections.singletonMap("enabled", hasNorms)); + } } if (randomBoolean()) { mapping.startObject("fields").startObject("raw").field("type", "keyword").endObject().endObject(); @@ -172,6 +179,9 @@ public class StringMappingUpgradeTests extends ESSingleNodeTestCase { } else { assertThat(field, instanceOf(TextFieldMapper.class)); } + if (field.fieldType().indexOptions() != IndexOptions.NONE) { + assertEquals(hasNorms, field.fieldType().omitNorms() == false); + } } } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/TextFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/TextFieldMapperTests.java index 3a9d5b46ab9..8dba6dd3fe3 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/core/TextFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/core/TextFieldMapperTests.java @@ -132,9 +132,7 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("field") .field("type", "text") - .startObject("norms") - .field("enabled", false) - .endObject() + .field("norms", false) .endObject().endObject() .endObject().endObject().string(); @@ -386,4 +384,5 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase { assertThat(doc.rootDoc().getField("field6").fieldType().storeTermVectorPositions(), equalTo(true)); assertThat(doc.rootDoc().getField("field6").fieldType().storeTermVectorPayloads(), equalTo(true)); } + } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldsIntegrationIT.java b/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldsIntegrationIT.java index 6a82052bfa8..a1f6929fade 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldsIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldsIntegrationIT.java @@ -112,7 +112,7 @@ public class MultiFieldsIntegrationIT extends ESIntegTestCase { assertThat(mappingMetaData, not(nullValue())); Map mappingSource = mappingMetaData.sourceAsMap(); Map aField = ((Map) XContentMapValues.extractValue("properties.a", mappingSource)); - logger.info("Keys: " + aField.keySet()); + logger.info("Keys: {}", aField.keySet()); assertThat(aField.size(), equalTo(2)); assertThat(aField.get("type").toString(), equalTo("geo_point")); assertThat(aField.get("fields"), notNullValue()); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/numeric/SimpleNumericTests.java b/core/src/test/java/org/elasticsearch/index/mapper/numeric/SimpleNumericTests.java index 09804f82919..c10ccd14262 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/numeric/SimpleNumericTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/numeric/SimpleNumericTests.java @@ -684,4 +684,20 @@ public class SimpleNumericTests extends ESSingleNodeTestCase { parser = createIndex("index2-" + type, oldIndexSettings).mapperService().documentMapperParser(); parser.parse("type", new CompressedXContent(mappingWithTV)); // no exception } + + public void testRejectNorms() throws IOException { + // not supported as of 5.0 + for (String type : Arrays.asList("byte", "short", "integer", "long", "float", "double")) { + DocumentMapperParser parser = createIndex("index-" + type).mapperService().documentMapperParser(); + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties") + .startObject("foo") + .field("type", type) + .field("norms", random().nextBoolean()) + .endObject() + .endObject().endObject().endObject().string(); + MapperParsingException e = expectThrows(MapperParsingException.class, () -> parser.parse("type", new CompressedXContent(mapping))); + assertThat(e.getMessage(), containsString("Mapping definition for [foo] has unsupported parameters: [norms")); + } + } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java index 86c67db219f..8007e624836 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java @@ -564,7 +564,7 @@ public class SimpleStringMappingTests extends ESSingleNodeTestCase { mapperService.merge("type", new CompressedXContent(updatedMapping), MapperService.MergeReason.MAPPING_UPDATE, false); fail(); } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("different [omit_norms]")); + assertThat(e.getMessage(), containsString("different [norms]")); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingOnClusterIT.java b/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingOnClusterIT.java index a3d6a87c43f..600f84b5f5f 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingOnClusterIT.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingOnClusterIT.java @@ -49,7 +49,7 @@ public class UpdateMappingOnClusterIT extends ESIntegTestCase { String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/update/all_mapping_create_index.json"); String mappingUpdate = copyToStringFromClasspath("/org/elasticsearch/index/mapper/update/all_mapping_update_with_conflicts.json"); String[] errorMessage = { - "[_all] has different [omit_norms] values", + "[_all] has different [norms] values", "[_all] has different [store] values", "[_all] has different [store_term_vector] values", "[_all] has different [store_term_vector_offsets] values", diff --git a/core/src/test/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionIT.java b/core/src/test/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionIT.java index 4853d59588b..0c36a856682 100644 --- a/core/src/test/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionIT.java +++ b/core/src/test/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionIT.java @@ -77,7 +77,7 @@ public class IndexLifecycleActionIT extends ESIntegTestCase { logger.info("Running Cluster Health"); ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet(); - logger.info("Done Cluster Health, status " + clusterHealth.getStatus()); + logger.info("Done Cluster Health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW)); @@ -92,7 +92,7 @@ public class IndexLifecycleActionIT extends ESIntegTestCase { // first wait for 2 nodes in the cluster logger.info("Running Cluster Health"); clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("2")).actionGet(); - logger.info("Done Cluster Health, status " + clusterHealth.getStatus()); + logger.info("Done Cluster Health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); final String node2 = getLocalNodeId(server_2); @@ -171,7 +171,7 @@ public class IndexLifecycleActionIT extends ESIntegTestCase { // verify health logger.info("Running Cluster Health"); clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("2")).actionGet(); - logger.info("Done Cluster Health, status " + clusterHealth.getStatus()); + logger.info("Done Cluster Health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java b/core/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java index 58911d86f49..e24b7e3e0b4 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java @@ -47,6 +47,7 @@ import org.elasticsearch.action.suggest.SuggestRequestBuilder; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.index.IndexNotFoundException; @@ -642,9 +643,12 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase { return "a plugin that adds a dynamic tst setting"; } - private static final Setting INDEX_A = new Setting<>("index.a", "", Function.identity(), true, Setting.Scope.INDEX); - private static final Setting INDEX_C = new Setting<>("index.c", "", Function.identity(), true, Setting.Scope.INDEX); - private static final Setting INDEX_E = new Setting<>("index.e", "", Function.identity(), false, Setting.Scope.INDEX); + private static final Setting INDEX_A = + new Setting<>("index.a", "", Function.identity(), Property.Dynamic, Property.IndexScope); + private static final Setting INDEX_C = + new Setting<>("index.c", "", Function.identity(), Property.Dynamic, Property.IndexScope); + private static final Setting INDEX_E = + new Setting<>("index.e", "", Function.identity(), Property.IndexScope); public void onModule(SettingsModule module) { diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java b/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java index e36f1bca49b..646d9651436 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java @@ -282,7 +282,7 @@ public class IndicesRequestCacheTests extends ESTestCase { assertEquals("foo", value1.toUtf8()); BytesReference value2 = cache.getOrCompute(secondEntity, secondReader, termQuery.buildAsBytes()); assertEquals("bar", value2.toUtf8()); - logger.info(requestCacheStats.stats().getMemorySize().toString()); + logger.info("Memory size: {}", requestCacheStats.stats().getMemorySize()); BytesReference value3 = cache.getOrCompute(thirddEntity, thirdReader, termQuery.buildAsBytes()); assertEquals("baz", value3.toUtf8()); assertEquals(2, cache.count()); @@ -319,7 +319,7 @@ public class IndicesRequestCacheTests extends ESTestCase { assertEquals("foo", value1.toUtf8()); BytesReference value2 = cache.getOrCompute(secondEntity, secondReader, termQuery.buildAsBytes()); assertEquals("bar", value2.toUtf8()); - logger.info(requestCacheStats.stats().getMemorySize().toString()); + logger.info("Memory size: {}", requestCacheStats.stats().getMemorySize()); BytesReference value3 = cache.getOrCompute(thirddEntity, thirdReader, termQuery.buildAsBytes()); assertEquals("baz", value3.toUtf8()); assertEquals(3, cache.count()); diff --git a/core/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java b/core/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java index 0951f3c46df..8e064f46e12 100644 --- a/core/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java @@ -156,15 +156,15 @@ public class UpdateMappingIntegrationIT extends ESIntegTestCase { public void testUpdateMappingWithNormsConflicts() throws Exception { client().admin().indices().prepareCreate("test") - .addMapping("type", "{\"type\":{\"properties\":{\"body\":{\"type\":\"text\", \"norms\": { \"enabled\": false }}}}}") + .addMapping("type", "{\"type\":{\"properties\":{\"body\":{\"type\":\"text\", \"norms\": false }}}}") .execute().actionGet(); try { client().admin().indices().preparePutMapping("test").setType("type") - .setSource("{\"type\":{\"properties\":{\"body\":{\"type\":\"text\", \"norms\": { \"enabled\": true }}}}}").execute() + .setSource("{\"type\":{\"properties\":{\"body\":{\"type\":\"text\", \"norms\": true }}}}").execute() .actionGet(); fail("Expected MergeMappingException"); } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("mapper [body] has different [omit_norms]")); + assertThat(e.getMessage(), containsString("mapper [body] has different [norms]")); } } diff --git a/core/src/test/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java b/core/src/test/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java index 35ed7a2c657..6bea3217894 100644 --- a/core/src/test/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java +++ b/core/src/test/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java @@ -31,6 +31,7 @@ import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.client.Requests; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.common.unit.TimeValue; @@ -197,8 +198,10 @@ public class RandomExceptionCircuitBreakerIT extends ESIntegTestCase { // TODO: Generalize this class and add it as a utility public static class RandomExceptionDirectoryReaderWrapper extends MockEngineSupport.DirectoryReaderWrapper { - public static final Setting EXCEPTION_TOP_LEVEL_RATIO_SETTING = Setting.doubleSetting(EXCEPTION_TOP_LEVEL_RATIO_KEY, 0.1d, 0.0d, false, Setting.Scope.INDEX); - public static final Setting EXCEPTION_LOW_LEVEL_RATIO_SETTING = Setting.doubleSetting(EXCEPTION_LOW_LEVEL_RATIO_KEY, 0.1d, 0.0d, false, Setting.Scope.INDEX); + public static final Setting EXCEPTION_TOP_LEVEL_RATIO_SETTING = + Setting.doubleSetting(EXCEPTION_TOP_LEVEL_RATIO_KEY, 0.1d, 0.0d, Property.IndexScope); + public static final Setting EXCEPTION_LOW_LEVEL_RATIO_SETTING = + Setting.doubleSetting(EXCEPTION_LOW_LEVEL_RATIO_KEY, 0.1d, 0.0d, Property.IndexScope); public static class TestPlugin extends Plugin { @Override public String name() { diff --git a/core/src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasIT.java b/core/src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasIT.java index b257e3bcd5e..1a2f7e4ba18 100644 --- a/core/src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasIT.java +++ b/core/src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasIT.java @@ -48,7 +48,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase { assertAcked(prepareCreate("test", 2)); logger.info("Running Cluster Health"); ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet(); - logger.info("Done Cluster Health, status " + clusterHealth.getStatus()); + logger.info("Done Cluster Health, status {}", clusterHealth.getStatus()); NumShards numShards = getNumShards("test"); @@ -75,7 +75,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase { assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put("index.number_of_replicas", 2)).execute().actionGet()); logger.info("Running Cluster Health"); clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().setWaitForActiveShards(numShards.numPrimaries * 2).execute().actionGet(); - logger.info("Done Cluster Health, status " + clusterHealth.getStatus()); + logger.info("Done Cluster Health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW)); assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries)); @@ -88,7 +88,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase { logger.info("Running Cluster Health"); clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForRelocatingShards(0).setWaitForNodes(">=3").execute().actionGet(); - logger.info("Done Cluster Health, status " + clusterHealth.getStatus()); + logger.info("Done Cluster Health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries)); @@ -106,7 +106,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase { logger.info("Running Cluster Health"); clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForRelocatingShards(0).setWaitForNodes(">=3").execute().actionGet(); - logger.info("Done Cluster Health, status " + clusterHealth.getStatus()); + logger.info("Done Cluster Health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries)); @@ -128,7 +128,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase { logger.info("--> running cluster health"); ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(numShards.numPrimaries * 2).execute().actionGet(); - logger.info("--> done cluster health, status " + clusterHealth.getStatus()); + logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries)); @@ -140,7 +140,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase { logger.info("--> running cluster health"); clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(numShards.numPrimaries * 3).setWaitForNodes(">=3").execute().actionGet(); - logger.info("--> done cluster health, status " + clusterHealth.getStatus()); + logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries)); @@ -153,7 +153,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase { logger.info("--> running cluster health"); clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(numShards.numPrimaries * 2).setWaitForNodes(">=2").execute().actionGet(); - logger.info("--> done cluster health, status " + clusterHealth.getStatus()); + logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries)); @@ -166,7 +166,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase { logger.info("--> running cluster health"); clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes(">=1").setWaitForActiveShards(numShards.numPrimaries).execute().actionGet(); - logger.info("--> done cluster health, status " + clusterHealth.getStatus()); + logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries)); @@ -183,7 +183,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase { logger.info("--> running cluster health"); ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(numShards.numPrimaries * 2).execute().actionGet(); - logger.info("--> done cluster health, status " + clusterHealth.getStatus()); + logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries)); @@ -195,7 +195,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase { logger.info("--> running cluster health"); clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(numShards.numPrimaries * 3).execute().actionGet(); - logger.info("--> done cluster health, status " + clusterHealth.getStatus()); + logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries)); @@ -208,7 +208,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase { logger.info("--> running cluster health"); clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes(">=2").setWaitForActiveShards(numShards.numPrimaries * 2).execute().actionGet(); - logger.info("--> done cluster health, status " + clusterHealth.getStatus()); + logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries)); @@ -221,7 +221,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase { logger.info("--> running cluster health"); clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().setWaitForNodes(">=1").setWaitForActiveShards(numShards.numPrimaries).execute().actionGet(); - logger.info("--> done cluster health, status " + clusterHealth.getStatus()); + logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW)); assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries)); @@ -237,7 +237,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase { logger.info("--> running cluster health"); ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(numShards.numPrimaries * 3).execute().actionGet(); - logger.info("--> done cluster health, status " + clusterHealth.getStatus()); + logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries)); @@ -253,7 +253,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase { logger.info("--> running cluster health"); clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(numShards.numPrimaries * 4).execute().actionGet(); - logger.info("--> done cluster health, status " + clusterHealth.getStatus()); + logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries)); diff --git a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java index b1f94f203e4..26e2b7702c8 100644 --- a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java @@ -258,7 +258,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase { assertThat(waitForShardDeletion(node_3, "test", 0), equalTo(false)); Path server2Shard = shardDirectory(node_2, "test", 0); - logger.info("--> stopping node " + node_2); + logger.info("--> stopping node {}", node_2); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node_2)); logger.info("--> running cluster_health"); @@ -268,7 +268,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase { .setWaitForRelocatingShards(0) .get(); assertThat(clusterHealth.isTimedOut(), equalTo(false)); - logger.info("--> done cluster_health, status " + clusterHealth.getStatus()); + logger.info("--> done cluster_health, status {}", clusterHealth.getStatus()); assertThat(Files.exists(server2Shard), equalTo(true)); diff --git a/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java b/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java index cce687fcec3..d14a411c332 100644 --- a/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java +++ b/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java @@ -131,7 +131,7 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase { .addField("field1").addField("field2") .execute().actionGet(); if (searchResponse.getFailedShards() > 0) { - logger.warn("failed search " + Arrays.toString(searchResponse.getShardFailures())); + logger.warn("failed search {}", Arrays.toString(searchResponse.getShardFailures())); } assertHitCount(searchResponse, 1); assertThat(searchResponse.getHits().getAt(0).field("field1").value().toString(), equalTo("value1")); diff --git a/core/src/test/java/org/elasticsearch/node/internal/InternalSettingsPreparerTests.java b/core/src/test/java/org/elasticsearch/node/internal/InternalSettingsPreparerTests.java index 33876ef61ad..c979b2f4013 100644 --- a/core/src/test/java/org/elasticsearch/node/internal/InternalSettingsPreparerTests.java +++ b/core/src/test/java/org/elasticsearch/node/internal/InternalSettingsPreparerTests.java @@ -24,7 +24,7 @@ import java.io.InputStream; import java.nio.file.Files; import java.nio.file.Path; -import org.elasticsearch.common.cli.MockTerminal; +import org.elasticsearch.cli.MockTerminal; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; diff --git a/core/src/test/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java b/core/src/test/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java index e3777e84f9a..a4632079b35 100644 --- a/core/src/test/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java +++ b/core/src/test/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java @@ -47,11 +47,11 @@ public class SimpleNodesInfoIT extends ESIntegTestCase { final String node_2 = nodesIds.get(1); ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForGreenStatus().setWaitForNodes("2").get(); - logger.info("--> done cluster_health, status " + clusterHealth.getStatus()); + logger.info("--> done cluster_health, status {}", clusterHealth.getStatus()); String server1NodeId = internalCluster().getInstance(ClusterService.class, node_1).state().nodes().localNodeId(); String server2NodeId = internalCluster().getInstance(ClusterService.class, node_2).state().nodes().localNodeId(); - logger.info("--> started nodes: " + server1NodeId + " and " + server2NodeId); + logger.info("--> started nodes: {} and {}", server1NodeId, server2NodeId); NodesInfoResponse response = client().admin().cluster().prepareNodesInfo().execute().actionGet(); assertThat(response.getNodes().length, is(2)); @@ -91,11 +91,11 @@ public class SimpleNodesInfoIT extends ESIntegTestCase { final String node_2 = nodesIds.get(1); ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForGreenStatus().setWaitForNodes("2").get(); - logger.info("--> done cluster_health, status " + clusterHealth.getStatus()); + logger.info("--> done cluster_health, status {}", clusterHealth.getStatus()); String server1NodeId = internalCluster().getInstance(ClusterService.class, node_1).state().nodes().localNodeId(); String server2NodeId = internalCluster().getInstance(ClusterService.class, node_2).state().nodes().localNodeId(); - logger.info("--> started nodes: " + server1NodeId + " and " + server2NodeId); + logger.info("--> started nodes: {} and {}", server1NodeId, server2NodeId); NodesInfoResponse response = client().admin().cluster().prepareNodesInfo().execute().actionGet(); diff --git a/core/src/test/java/org/elasticsearch/percolator/ConcurrentPercolatorIT.java b/core/src/test/java/org/elasticsearch/percolator/ConcurrentPercolatorIT.java index e99cf51758b..f2493d85e86 100644 --- a/core/src/test/java/org/elasticsearch/percolator/ConcurrentPercolatorIT.java +++ b/core/src/test/java/org/elasticsearch/percolator/ConcurrentPercolatorIT.java @@ -292,7 +292,7 @@ public class ConcurrentPercolatorIT extends ESIntegTestCase { } for (Throwable t : exceptionsHolder) { - logger.error("Unexpected exception {}", t.getMessage(), t); + logger.error("Unexpected exception while indexing", t); } assertThat(exceptionsHolder.isEmpty(), equalTo(true)); } diff --git a/core/src/test/java/org/elasticsearch/percolator/PercolatorIT.java b/core/src/test/java/org/elasticsearch/percolator/PercolatorIT.java index 01494aab72d..5945a21dc97 100644 --- a/core/src/test/java/org/elasticsearch/percolator/PercolatorIT.java +++ b/core/src/test/java/org/elasticsearch/percolator/PercolatorIT.java @@ -1072,7 +1072,7 @@ public class PercolatorIT extends ESIntegTestCase { int numLevels = randomIntBetween(1, 25); long numQueriesPerLevel = randomIntBetween(10, 250); long totalQueries = numLevels * numQueriesPerLevel; - logger.info("--> register " + totalQueries + " queries"); + logger.info("--> register {} queries", totalQueries); for (int level = 1; level <= numLevels; level++) { for (int query = 1; query <= numQueriesPerLevel; query++) { client().prepareIndex("my-index", PercolatorService.TYPE_NAME, level + "-" + query) @@ -1166,7 +1166,7 @@ public class PercolatorIT extends ESIntegTestCase { Map> controlMap = new HashMap<>(); long numQueries = randomIntBetween(100, 250); - logger.info("--> register " + numQueries + " queries"); + logger.info("--> register {} queries", numQueries); for (int i = 0; i < numQueries; i++) { int value = randomInt(10); client().prepareIndex("my-index", PercolatorService.TYPE_NAME, Integer.toString(i)) diff --git a/core/src/test/java/org/elasticsearch/percolator/RecoveryPercolatorIT.java b/core/src/test/java/org/elasticsearch/percolator/RecoveryPercolatorIT.java index a08eb41236d..f76a117ddb0 100644 --- a/core/src/test/java/org/elasticsearch/percolator/RecoveryPercolatorIT.java +++ b/core/src/test/java/org/elasticsearch/percolator/RecoveryPercolatorIT.java @@ -131,7 +131,7 @@ public class RecoveryPercolatorIT extends ESIntegTestCase { logger.info("Running Cluster Health (wait for the shards to startup)"); ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet(); - logger.info("Done Cluster Health, status " + clusterHealth.getStatus()); + logger.info("Done Cluster Health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); SearchResponse countResponse = client().prepareSearch().setSize(0).setTypes(PercolatorService.TYPE_NAME).setQuery(matchAllQuery()).get(); assertHitCount(countResponse, 1L); @@ -140,7 +140,7 @@ public class RecoveryPercolatorIT extends ESIntegTestCase { assertThat(actionGet.isAcknowledged(), equalTo(true)); assertAcked(prepareCreate("test").addMapping("type1", "field1", "type=text").addMapping(PercolatorService.TYPE_NAME, "color", "type=text")); clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet(); - logger.info("Done Cluster Health, status " + clusterHealth.getStatus()); + logger.info("Done Cluster Health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(client().prepareSearch().setSize(0).setTypes(PercolatorService.TYPE_NAME).setQuery(matchAllQuery()).get().getHits().totalHits(), equalTo(0L)); diff --git a/core/src/test/java/org/elasticsearch/percolator/TTLPercolatorIT.java b/core/src/test/java/org/elasticsearch/percolator/TTLPercolatorIT.java index 52f8ecb4b13..f85b12d85ac 100644 --- a/core/src/test/java/org/elasticsearch/percolator/TTLPercolatorIT.java +++ b/core/src/test/java/org/elasticsearch/percolator/TTLPercolatorIT.java @@ -184,7 +184,7 @@ public class TTLPercolatorIT extends ESIntegTestCase { .endObject() ).setTTL(randomIntBetween(1, 500)).setRefresh(true).execute().actionGet(); } catch (MapperParsingException e) { - logger.info("failed indexing {}", i, e); + logger.info("failed indexing {}", e, i); // if we are unlucky the TTL is so small that we see the expiry date is already in the past when // we parse the doc ignore those... assertThat(e.getCause(), Matchers.instanceOf(AlreadyExpiredException.class)); diff --git a/core/src/test/java/org/elasticsearch/plugins/PluginCliTests.java b/core/src/test/java/org/elasticsearch/plugins/PluginCliTests.java deleted file mode 100644 index bd280e4e1d7..00000000000 --- a/core/src/test/java/org/elasticsearch/plugins/PluginCliTests.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.plugins; - -import org.elasticsearch.common.cli.CliToolTestCase; -import org.elasticsearch.common.cli.MockTerminal; - -import static org.elasticsearch.common.cli.CliTool.ExitStatus.OK_AND_EXIT; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.hasItem; -import static org.hamcrest.Matchers.is; - -public class PluginCliTests extends CliToolTestCase { - public void testHelpWorks() throws Exception { - MockTerminal terminal = new MockTerminal(); - assertThat(new PluginCli(terminal).execute(args("--help")), is(OK_AND_EXIT)); - assertTerminalOutputContainsHelpFile(terminal, "/org/elasticsearch/plugins/plugin.help"); - - terminal.resetOutput(); - assertThat(new PluginCli(terminal).execute(args("install -h")), is(OK_AND_EXIT)); - assertTerminalOutputContainsHelpFile(terminal, "/org/elasticsearch/plugins/plugin-install.help"); - for (String plugin : InstallPluginCommand.OFFICIAL_PLUGINS) { - assertThat(terminal.getOutput(), containsString(plugin)); - } - - terminal.resetOutput(); - assertThat(new PluginCli(terminal).execute(args("remove --help")), is(OK_AND_EXIT)); - assertTerminalOutputContainsHelpFile(terminal, "/org/elasticsearch/plugins/plugin-remove.help"); - - terminal.resetOutput(); - assertThat(new PluginCli(terminal).execute(args("list -h")), is(OK_AND_EXIT)); - assertTerminalOutputContainsHelpFile(terminal, "/org/elasticsearch/plugins/plugin-list.help"); - } -} diff --git a/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java b/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java index 16f27655055..b441dd32c78 100644 --- a/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java +++ b/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java @@ -123,7 +123,7 @@ public class TruncatedRecoveryIT extends ESIntegTestCase { public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException { if (action.equals(RecoveryTargetService.Actions.FILE_CHUNK)) { RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request; - logger.debug("file chunk [" + req.toString() + "] lastChunk: " + req.lastChunk()); + logger.debug("file chunk [{}] lastChunk: {}", req, req.lastChunk()); if ((req.name().endsWith("cfs") || req.name().endsWith("fdt")) && req.lastChunk() && truncate.get()) { latch.countDown(); throw new RuntimeException("Caused some truncated files for fun and profit"); diff --git a/core/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java b/core/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java new file mode 100644 index 00000000000..848c62ab2b4 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java @@ -0,0 +1,187 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.rest.action.cat; + +import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; +import org.elasticsearch.cluster.metadata.SnapshotId; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.RestoreSource; +import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.Table; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.recovery.RecoveryState; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.test.ESTestCase; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +import static org.elasticsearch.mock.orig.Mockito.when; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.mockito.Mockito.mock; + +public class RestRecoveryActionTests extends ESTestCase { + + public void testRestRecoveryAction() { + final Settings settings = Settings.EMPTY; + final RestController restController = new RestController(settings); + final RestRecoveryAction action = new RestRecoveryAction(settings, restController, restController, null); + final int totalShards = randomIntBetween(1, 32); + final int successfulShards = Math.max(0, totalShards - randomIntBetween(1, 2)); + final int failedShards = totalShards - successfulShards; + final boolean detailed = randomBoolean(); + final Map> shardRecoveryStates = new HashMap<>(); + final List recoveryStates = new ArrayList<>(); + + for (int i = 0; i < successfulShards; i++) { + final RecoveryState state = mock(RecoveryState.class); + when(state.getShardId()).thenReturn(new ShardId(new Index("index", "_na_"), i)); + final RecoveryState.Timer timer = mock(RecoveryState.Timer.class); + when(timer.time()).thenReturn((long)randomIntBetween(1000000, 10 * 1000000)); + when(state.getTimer()).thenReturn(timer); + when(state.getType()).thenReturn(randomFrom(RecoveryState.Type.values())); + when(state.getStage()).thenReturn(randomFrom(RecoveryState.Stage.values())); + final DiscoveryNode sourceNode = randomBoolean() ? mock(DiscoveryNode.class) : null; + if (sourceNode != null) { + when(sourceNode.getHostName()).thenReturn(randomAsciiOfLength(8)); + } + when(state.getSourceNode()).thenReturn(sourceNode); + final DiscoveryNode targetNode = mock(DiscoveryNode.class); + when(targetNode.getHostName()).thenReturn(randomAsciiOfLength(8)); + when(state.getTargetNode()).thenReturn(targetNode); + + final RestoreSource restoreSource = randomBoolean() ? mock(RestoreSource.class) : null; + if (restoreSource != null) { + final SnapshotId snapshotId = mock(SnapshotId.class); + when(snapshotId.getRepository()).thenReturn(randomAsciiOfLength(8)); + when(snapshotId.getSnapshot()).thenReturn(randomAsciiOfLength(8)); + when(restoreSource.snapshotId()).thenReturn(snapshotId); + } + + RecoveryState.Index index = mock(RecoveryState.Index.class); + + final int totalRecoveredFiles = randomIntBetween(1, 64); + when(index.totalRecoverFiles()).thenReturn(totalRecoveredFiles); + final int recoveredFileCount = randomIntBetween(0, totalRecoveredFiles); + when(index.recoveredFileCount()).thenReturn(recoveredFileCount); + when(index.recoveredFilesPercent()).thenReturn((100f * recoveredFileCount) / totalRecoveredFiles); + when(index.totalFileCount()).thenReturn(randomIntBetween(totalRecoveredFiles, 2 * totalRecoveredFiles)); + + final int totalRecoveredBytes = randomIntBetween(1, 1 << 24); + when(index.totalRecoverBytes()).thenReturn((long)totalRecoveredBytes); + final int recoveredBytes = randomIntBetween(0, totalRecoveredBytes); + when(index.recoveredBytes()).thenReturn((long)recoveredBytes); + when(index.recoveredBytesPercent()).thenReturn((100f * recoveredBytes) / totalRecoveredBytes); + when(index.totalRecoverBytes()).thenReturn((long)randomIntBetween(totalRecoveredBytes, 2 * totalRecoveredBytes)); + when(state.getIndex()).thenReturn(index); + + final RecoveryState.Translog translog = mock(RecoveryState.Translog.class); + final int translogOps = randomIntBetween(0, 1 << 18); + when(translog.totalOperations()).thenReturn(translogOps); + final int translogOpsRecovered = randomIntBetween(0, translogOps); + when(translog.recoveredOperations()).thenReturn(translogOpsRecovered); + when(translog.recoveredPercent()).thenReturn(translogOps == 0 ? 100f : (100f * translogOpsRecovered / translogOps)); + when(state.getTranslog()).thenReturn(translog); + + recoveryStates.add(state); + } + + final List shuffle = new ArrayList<>(recoveryStates); + Randomness.shuffle(shuffle); + shardRecoveryStates.put("index", shuffle); + + final List shardFailures = new ArrayList<>(); + final RecoveryResponse response = new RecoveryResponse( + totalShards, + successfulShards, + failedShards, + detailed, + shardRecoveryStates, + shardFailures); + final Table table = action.buildRecoveryTable(null, response); + + assertNotNull(table); + + List headers = table.getHeaders(); + assertThat(headers.get(0).value, equalTo("index")); + assertThat(headers.get(1).value, equalTo("shard")); + assertThat(headers.get(2).value, equalTo("time")); + assertThat(headers.get(3).value, equalTo("type")); + assertThat(headers.get(4).value, equalTo("stage")); + assertThat(headers.get(5).value, equalTo("source_host")); + assertThat(headers.get(6).value, equalTo("target_host")); + assertThat(headers.get(7).value, equalTo("repository")); + assertThat(headers.get(8).value, equalTo("snapshot")); + assertThat(headers.get(9).value, equalTo("files")); + assertThat(headers.get(10).value, equalTo("files_recovered")); + assertThat(headers.get(11).value, equalTo("files_percent")); + assertThat(headers.get(12).value, equalTo("files_total")); + assertThat(headers.get(13).value, equalTo("bytes")); + assertThat(headers.get(14).value, equalTo("bytes_recovered")); + assertThat(headers.get(15).value, equalTo("bytes_percent")); + assertThat(headers.get(16).value, equalTo("bytes_total")); + assertThat(headers.get(17).value, equalTo("translog_ops")); + assertThat(headers.get(18).value, equalTo("translog_ops_recovered")); + assertThat(headers.get(19).value, equalTo("translog_ops_percent")); + + assertThat(table.getRows().size(), equalTo(successfulShards)); + for (int i = 0; i < successfulShards; i++) { + final RecoveryState state = recoveryStates.get(i); + List cells = table.getRows().get(i); + assertThat(cells.get(0).value, equalTo("index")); + assertThat(cells.get(1).value, equalTo(i)); + assertThat(cells.get(2).value, equalTo(new TimeValue(state.getTimer().time()))); + assertThat(cells.get(3).value, equalTo(state.getType().name().toLowerCase(Locale.ROOT))); + assertThat(cells.get(4).value, equalTo(state.getStage().name().toLowerCase(Locale.ROOT))); + assertThat(cells.get(5).value, equalTo(state.getSourceNode() == null ? "n/a" : state.getSourceNode().getHostName())); + assertThat(cells.get(6).value, equalTo(state.getTargetNode().getHostName())); + assertThat( + cells.get(7).value, + equalTo(state.getRestoreSource() == null ? "n/a" : state.getRestoreSource().snapshotId().getRepository())); + assertThat( + cells.get(8).value, + equalTo(state.getRestoreSource() == null ? "n/a" : state.getRestoreSource().snapshotId().getSnapshot())); + assertThat(cells.get(9).value, equalTo(state.getIndex().totalRecoverFiles())); + assertThat(cells.get(10).value, equalTo(state.getIndex().recoveredFileCount())); + assertThat(cells.get(11).value, equalTo(percent(state.getIndex().recoveredFilesPercent()))); + assertThat(cells.get(12).value, equalTo(state.getIndex().totalFileCount())); + assertThat(cells.get(13).value, equalTo(state.getIndex().totalRecoverBytes())); + assertThat(cells.get(14).value, equalTo(state.getIndex().recoveredBytes())); + assertThat(cells.get(15).value, equalTo(percent(state.getIndex().recoveredBytesPercent()))); + assertThat(cells.get(16).value, equalTo(state.getIndex().totalBytes())); + assertThat(cells.get(17).value, equalTo(state.getTranslog().totalOperations())); + assertThat(cells.get(18).value, equalTo(state.getTranslog().recoveredOperations())); + assertThat(cells.get(19).value, equalTo(percent(state.getTranslog().recoveredPercent()))); + } + } + + private static String percent(float percent) { + return String.format(Locale.ROOT, "%1.1f%%", percent); + } + +} diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ChildrenIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ChildrenIT.java index 74c308c0fcd..4e4b54d91f6 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ChildrenIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ChildrenIT.java @@ -185,7 +185,7 @@ children("to_comment", "comment") assertThat(categoryTerms.getBuckets().size(), equalTo(3)); for (Terms.Bucket bucket : categoryTerms.getBuckets()) { - logger.info("bucket=" + bucket.getKey()); + logger.info("bucket={}", bucket.getKey()); Children childrenBucket = bucket.getAggregations().get("to_comment"); TopHits topHits = childrenBucket.getAggregations().get("top_comments"); logger.info("total_hits={}", topHits.getHits().getTotalHits()); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java index 044ca4f8045..11d838d43c4 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java @@ -84,7 +84,7 @@ public class NestedIT extends ESIntegTestCase { numParents = randomIntBetween(3, 10); numChildren = new int[numParents]; aggCollectionMode = randomFrom(SubAggCollectionMode.values()); - logger.info("AGG COLLECTION MODE: " + aggCollectionMode); + logger.info("AGG COLLECTION MODE: {}", aggCollectionMode); int totalChildren = 0; for (int i = 0; i < numParents; ++i) { if (i == numParents - 1 && totalChildren == 0) { diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java index 8304922aa62..14f2912d19f 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java @@ -433,7 +433,7 @@ public class TopHitsIT extends ESIntegTestCase { assertThat(hits.totalHits(), equalTo(controlHits.totalHits())); assertThat(hits.getHits().length, equalTo(controlHits.getHits().length)); for (int i = 0; i < hits.getHits().length; i++) { - logger.info(i + ": top_hits: [" + hits.getAt(i).id() + "][" + hits.getAt(i).sortValues()[0] + "] control: [" + controlHits.getAt(i).id() + "][" + controlHits.getAt(i).sortValues()[0] + "]"); + logger.info("{}: top_hits: [{}][{}] control: [{}][{}]", i, hits.getAt(i).id(), hits.getAt(i).sortValues()[0], controlHits.getAt(i).id(), controlHits.getAt(i).sortValues()[0]); assertThat(hits.getAt(i).id(), equalTo(controlHits.getAt(i).id())); assertThat(hits.getAt(i).sortValues()[0], equalTo(controlHits.getAt(i).sortValues()[0])); } @@ -609,7 +609,7 @@ public class TopHitsIT extends ESIntegTestCase { public void testTrackScores() throws Exception { boolean[] trackScores = new boolean[]{true, false}; for (boolean trackScore : trackScores) { - logger.info("Track score=" + trackScore); + logger.info("Track score={}", trackScore); SearchResponse response = client().prepareSearch("idx").setTypes("field-collapsing") .setQuery(matchQuery("text", "term rare")) .addAggregation(terms("terms") diff --git a/core/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java b/core/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java index beedd72c280..3168fdc0ff1 100644 --- a/core/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java +++ b/core/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java @@ -142,7 +142,7 @@ public class SearchWhileRelocatingIT extends ESIntegTestCase { } assertThat("numberOfReplicas: " + numberOfReplicas + " failed in iteration " + i + ", verification: " + verified, thrownExceptions, Matchers.emptyIterable()); // if we hit only non-critical exceptions we only make sure that the post search works - logger.info("Non-CriticalExceptions: " + nonCriticalExceptions.toString()); + logger.info("Non-CriticalExceptions: {}", nonCriticalExceptions); assertThat("numberOfReplicas: " + numberOfReplicas + " failed in iteration " + i + ", verification: " + verified, postSearchOK, is(true)); } } diff --git a/core/src/test/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsIT.java b/core/src/test/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsIT.java index d342402e4bf..489a604292d 100644 --- a/core/src/test/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsIT.java +++ b/core/src/test/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsIT.java @@ -29,6 +29,7 @@ import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings.Builder; import org.elasticsearch.common.settings.SettingsModule; @@ -152,8 +153,10 @@ public class SearchWithRandomExceptionsIT extends ESIntegTestCase { public static class RandomExceptionDirectoryReaderWrapper extends MockEngineSupport.DirectoryReaderWrapper { public static class TestPlugin extends Plugin { - public static final Setting EXCEPTION_TOP_LEVEL_RATIO_SETTING = Setting.doubleSetting(EXCEPTION_TOP_LEVEL_RATIO_KEY, 0.1d, 0.0d, false, Setting.Scope.INDEX); - public static final Setting EXCEPTION_LOW_LEVEL_RATIO_SETTING = Setting.doubleSetting(EXCEPTION_LOW_LEVEL_RATIO_KEY, 0.1d, 0.0d, false, Setting.Scope.INDEX); + public static final Setting EXCEPTION_TOP_LEVEL_RATIO_SETTING = + Setting.doubleSetting(EXCEPTION_TOP_LEVEL_RATIO_KEY, 0.1d, 0.0d, Property.IndexScope); + public static final Setting EXCEPTION_LOW_LEVEL_RATIO_SETTING = + Setting.doubleSetting(EXCEPTION_LOW_LEVEL_RATIO_KEY, 0.1d, 0.0d, Property.IndexScope); @Override public String name() { return "random-exception-reader-wrapper"; diff --git a/core/src/test/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java b/core/src/test/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java index 3cd1d269275..bd1d6ed9795 100644 --- a/core/src/test/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java +++ b/core/src/test/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java @@ -90,7 +90,7 @@ public class TransportSearchFailuresIT extends ESIntegTestCase { .cluster() .health(clusterHealthRequest("test").waitForYellowStatus().waitForRelocatingShards(0) .waitForActiveShards(test.totalNumShards)).actionGet(); - logger.info("Done Cluster Health, status " + clusterHealth.getStatus()); + logger.info("Done Cluster Health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), anyOf(equalTo(ClusterHealthStatus.YELLOW), equalTo(ClusterHealthStatus.GREEN))); assertThat(clusterHealth.getActiveShards(), equalTo(test.totalNumShards)); diff --git a/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java b/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java index 9440a3e91c1..2d178488dd9 100644 --- a/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java @@ -51,7 +51,6 @@ import org.hamcrest.Matchers; import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -576,20 +575,20 @@ public class ChildQuerySearchIT extends ESIntegTestCase { .setQuery(hasChildQuery("child", termQuery("c_field", "1")).scoreMode(ScoreMode.Max)) .get(); assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).explanation().getDescription(), equalTo("Score based on join value p1")); + assertThat(searchResponse.getHits().getAt(0).explanation().getDescription(), containsString("join value p1")); searchResponse = client().prepareSearch("test") .setExplain(true) .setQuery(hasParentQuery("parent", termQuery("p_field", "1")).score(true)) .get(); assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).explanation().getDescription(), equalTo("Score based on join value p1")); + assertThat(searchResponse.getHits().getAt(0).explanation().getDescription(), containsString("join value p1")); ExplainResponse explainResponse = client().prepareExplain("test", "parent", parentId) .setQuery(hasChildQuery("child", termQuery("c_field", "1")).scoreMode(ScoreMode.Max)) .get(); assertThat(explainResponse.isExists(), equalTo(true)); - assertThat(explainResponse.getExplanation().getDetails()[0].getDescription(), equalTo("Score based on join value p1")); + assertThat(explainResponse.getExplanation().getDetails()[0].getDescription(), containsString("join value p1")); } List createDocBuilders() { diff --git a/core/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java b/core/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java index e41e3c178c5..d124fcf6386 100644 --- a/core/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java +++ b/core/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java @@ -299,7 +299,7 @@ public class GeoShapeQueryTests extends ESSingleNodeTestCase { // Create a random geometry collection. GeometryCollectionBuilder gcb = RandomShapeGenerator.createGeometryCollection(getRandom()); - logger.info("Created Random GeometryCollection containing " + gcb.numShapes() + " shapes"); + logger.info("Created Random GeometryCollection containing {} shapes", gcb.numShapes()); client().admin().indices().prepareCreate("test").addMapping("type", "location", "type=geo_shape,tree=quadtree") .execute().actionGet(); diff --git a/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java b/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java index f34d5b33c9d..dbe2714d05d 100644 --- a/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java @@ -2044,7 +2044,7 @@ public class HighlighterSearchIT extends ESIntegTestCase { .query(multiMatchQueryBuilder) .highlighter(highlight().highlightQuery(randomBoolean() ? multiMatchQueryBuilder : null).highlighterType(highlighterType) .field(new Field("field1").requireFieldMatch(true).preTags("").postTags(""))); - logger.info("Running multi-match type: [" + matchQueryType + "] highlight with type: [" + highlighterType + "]"); + logger.info("Running multi-match type: [{}] highlight with type: [{}]", matchQueryType, highlighterType); SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet(); assertHitCount(searchResponse, 1L); assertHighlight(searchResponse, 0, "field1", 0, anyOf(equalTo("The quick brown fox jumps over"), diff --git a/core/src/test/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java b/core/src/test/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java index 8f0ef3c0fbe..651982106c5 100644 --- a/core/src/test/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java +++ b/core/src/test/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java @@ -383,7 +383,7 @@ public class MoreLikeThisIT extends ESIntegTestCase { int maxIters = randomIntBetween(10, 20); for (int i = 0; i < maxIters; i++) { int max_query_terms = randomIntBetween(1, values.length); - logger.info("Running More Like This with max_query_terms = %s", max_query_terms); + logger.info("Running More Like This with max_query_terms = {}", max_query_terms); MoreLikeThisQueryBuilder mltQuery = moreLikeThisQuery(new String[] {"text"}, null, new Item[] {new Item(null, null, "0")}) .minTermFreq(1).minDocFreq(1) .maxQueryTerms(max_query_terms).minimumShouldMatch("0%"); @@ -419,7 +419,7 @@ public class MoreLikeThisIT extends ESIntegTestCase { .minTermFreq(1) .minDocFreq(1) .minimumShouldMatch(minimumShouldMatch); - logger.info("Testing with minimum_should_match = " + minimumShouldMatch); + logger.info("Testing with minimum_should_match = {}", minimumShouldMatch); SearchResponse response = client().prepareSearch("test").setTypes("type1") .setQuery(mltQuery).get(); assertSearchResponse(response); diff --git a/core/src/test/java/org/elasticsearch/search/profile/QueryProfilerIT.java b/core/src/test/java/org/elasticsearch/search/profile/QueryProfilerIT.java index f09b18bdb8a..f5507504586 100644 --- a/core/src/test/java/org/elasticsearch/search/profile/QueryProfilerIT.java +++ b/core/src/test/java/org/elasticsearch/search/profile/QueryProfilerIT.java @@ -72,7 +72,7 @@ public class QueryProfilerIT extends ESIntegTestCase { int iters = between(20, 100); for (int i = 0; i < iters; i++) { QueryBuilder q = randomQueryBuilder(stringFields, numericFields, numDocs, 3); - logger.info(q.toString()); + logger.info("Query: {}", q); SearchResponse resp = client().prepareSearch() .setQuery(q) @@ -126,8 +126,7 @@ public class QueryProfilerIT extends ESIntegTestCase { int iters = between(1, 10); for (int i = 0; i < iters; i++) { QueryBuilder q = randomQueryBuilder(stringFields, numericFields, numDocs, 3); - logger.info(q.toString()); - + logger.info("Query: {}", q); SearchRequestBuilder vanilla = client().prepareSearch("test") .setQuery(q) @@ -309,7 +308,7 @@ public class QueryProfilerIT extends ESIntegTestCase { refresh(); QueryBuilder q = QueryBuilders.boolQuery(); - logger.info(q.toString()); + logger.info("Query: {}", q); SearchResponse resp = client().prepareSearch() .setQuery(q) @@ -360,8 +359,7 @@ public class QueryProfilerIT extends ESIntegTestCase { QueryBuilder q = QueryBuilders.boolQuery().must(QueryBuilders.boolQuery().must(QueryBuilders.boolQuery().must(QueryBuilders.matchQuery("field1", "one")))); - - logger.info(q.toString()); + logger.info("Query: {}", q); SearchResponse resp = client().prepareSearch() .setQuery(q) @@ -408,7 +406,7 @@ public class QueryProfilerIT extends ESIntegTestCase { QueryBuilder q = QueryBuilders.boostingQuery(QueryBuilders.matchQuery("field1", "one"), QueryBuilders.matchQuery("field1", "two")) .boost(randomFloat()) .negativeBoost(randomFloat()); - logger.info(q.toString()); + logger.info("Query: {}", q); SearchResponse resp = client().prepareSearch() .setQuery(q) @@ -455,7 +453,7 @@ public class QueryProfilerIT extends ESIntegTestCase { QueryBuilder q = QueryBuilders.disMaxQuery() .boost(0.33703882f) .add(QueryBuilders.rangeQuery("field2").from(null).to(73).includeLower(true).includeUpper(true)); - logger.info(q.toString()); + logger.info("Query: {}", q); SearchResponse resp = client().prepareSearch() .setQuery(q) @@ -501,7 +499,7 @@ public class QueryProfilerIT extends ESIntegTestCase { QueryBuilder q = QueryBuilders.rangeQuery("field2").from(0).to(5); - logger.info(q.toString()); + logger.info("Query: {}", q.toString()); SearchResponse resp = client().prepareSearch() .setQuery(q) @@ -547,7 +545,7 @@ public class QueryProfilerIT extends ESIntegTestCase { QueryBuilder q = QueryBuilders.matchPhraseQuery("field1", "one two"); - logger.info(q.toString()); + logger.info("Query: {}", q); SearchResponse resp = client().prepareSearch() .setQuery(q) @@ -559,7 +557,7 @@ public class QueryProfilerIT extends ESIntegTestCase { if (resp.getShardFailures().length > 0) { for (ShardSearchFailure f : resp.getShardFailures()) { - logger.error(f.toString()); + logger.error("Shard search failure: {}", f); } fail(); } @@ -603,7 +601,7 @@ public class QueryProfilerIT extends ESIntegTestCase { refresh(); QueryBuilder q = QueryBuilders.rangeQuery("field2").from(0).to(5); - logger.info(q.toString()); + logger.info("Query: {}", q); SearchResponse resp = client().prepareSearch().setQuery(q).setProfile(false).execute().actionGet(); assertThat("Profile response element should be an empty map", resp.getProfileResults().size(), equalTo(0)); diff --git a/core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java b/core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java index 23e2592447b..be190b547ea 100644 --- a/core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java +++ b/core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java @@ -156,12 +156,12 @@ public class MultiMatchQueryIT extends ESIntegTestCase { .endObject() .startObject("first_name") .field("type", "text") - .field("omit_norms", "true") + .field("norms", false) .field("copy_to", "first_name_phrase") .endObject() .startObject("last_name") .field("type", "text") - .field("omit_norms", "true") + .field("norms", false) .field("copy_to", "last_name_phrase") .endObject() .endObject() diff --git a/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java b/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java index 44b8636d51a..68b496cd566 100644 --- a/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java +++ b/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java @@ -117,7 +117,7 @@ public class SearchQueryIT extends ESIntegTestCase { public void testOmitNormsOnAll() throws ExecutionException, InterruptedException, IOException { assertAcked(prepareCreate("test") .addMapping("type1", jsonBuilder().startObject().startObject("type1") - .startObject("_all").field("omit_norms", true).endObject() + .startObject("_all").field("norms", false).endObject() .endObject().endObject()) .setSettings(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)); // only one shard otherwise IDF might be different for comparing scores diff --git a/core/src/test/java/org/elasticsearch/search/scroll/DuelScrollIT.java b/core/src/test/java/org/elasticsearch/search/scroll/DuelScrollIT.java index 292f9a495dc..6c0b9963940 100644 --- a/core/src/test/java/org/elasticsearch/search/scroll/DuelScrollIT.java +++ b/core/src/test/java/org/elasticsearch/search/scroll/DuelScrollIT.java @@ -282,8 +282,8 @@ public class DuelScrollIT extends ESIntegTestCase { } assertEquals(control.getHits().getTotalHits(), scrollDocs); } catch (AssertionError e) { - logger.info("Control:\n" + control); - logger.info("Scroll size=" + size + ", from=" + scrollDocs + ":\n" + scroll); + logger.info("Control:\n{}", control); + logger.info("Scroll size={}, from={}:\n{}", size, scrollDocs, scroll); throw e; } finally { clearScroll(scroll.getScrollId()); diff --git a/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java b/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java index dc06c43cb85..309c4bcdaf2 100644 --- a/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java +++ b/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java @@ -78,8 +78,8 @@ public class GeoDistanceSortBuilderIT extends ESIntegTestCase { GeoPoint[] d2Points = {new GeoPoint(5, 1), new GeoPoint(6, 2)}; createShuffeldJSONArray(d2Builder, d2Points); - logger.info(d1Builder.string()); - logger.info(d2Builder.string()); + logger.info("d1: {}", d1Builder); + logger.info("d2: {}", d2Builder); indexRandom(true, client().prepareIndex("index", "type", "d1").setSource(d1Builder), client().prepareIndex("index", "type", "d2").setSource(d2Builder)); diff --git a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index bd6c2533652..530b1fb25e9 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -325,7 +325,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest logger.info("--> execution was blocked on node [{}], shutting it down", blockedNode); unblockNode(blockedNode); - logger.info("--> stopping node", blockedNode); + logger.info("--> stopping node [{}]", blockedNode); stopNode(blockedNode); logger.info("--> waiting for completion"); SnapshotInfo snapshotInfo = waitForCompletion("test-repo", "test-snap", TimeValue.timeValueSeconds(60)); @@ -379,7 +379,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest // Make sure that abort makes some progress Thread.sleep(100); unblockNode(blockedNode); - logger.info("--> stopping node", blockedNode); + logger.info("--> stopping node [{}]", blockedNode); stopNode(blockedNode); try { DeleteSnapshotResponse deleteSnapshotResponse = deleteSnapshotResponseFuture.actionGet(); @@ -632,8 +632,8 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest client().admin().cluster().preparePutRepository("test-repo") .setType("mock").setSettings(Settings.settingsBuilder() .put("location", randomRepoPath()) - .put("secret.mock.username", "notsecretusername") - .put("secret.mock.password", "verysecretpassword") + .put(MockRepository.Plugin.USERNAME_SETTING.getKey(), "notsecretusername") + .put(MockRepository.Plugin.PASSWORD_SETTING.getKey(), "verysecretpassword") ).get(); RestGetRepositoriesAction getRepoAction = internalCluster().getInstance(RestGetRepositoriesAction.class); diff --git a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index a8a45e6a42f..5dc6d59692b 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -761,7 +761,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas Client client = client(); Path repo = randomRepoPath(); - logger.info("--> creating repository at " + repo.toAbsolutePath()); + logger.info("--> creating repository at {}", repo.toAbsolutePath()); assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType("fs").setSettings(Settings.settingsBuilder() .put("location", repo) @@ -817,7 +817,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas Client client = client(); Path repo = randomRepoPath(); - logger.info("--> creating repository at " + repo.toAbsolutePath()); + logger.info("--> creating repository at {}", repo.toAbsolutePath()); assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType("fs").setSettings(Settings.settingsBuilder() .put("location", repo) @@ -855,7 +855,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas Client client = client(); Path repo = randomRepoPath(); - logger.info("--> creating repository at " + repo.toAbsolutePath()); + logger.info("--> creating repository at {}", repo.toAbsolutePath()); assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType("fs").setSettings(Settings.settingsBuilder() .put("location", repo) @@ -889,7 +889,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas Client client = client(); Path repo = randomRepoPath(); - logger.info("--> creating repository at " + repo.toAbsolutePath()); + logger.info("--> creating repository at {}", repo.toAbsolutePath()); assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType("fs").setSettings(Settings.settingsBuilder() .put("location", repo) @@ -1448,7 +1448,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas } } - logger.info("--> checking snapshot status for all currently running and snapshot with empty repository", blockedNode); + logger.info("--> checking snapshot status for all currently running and snapshot with empty repository"); response = client.admin().cluster().prepareSnapshotStatus().execute().actionGet(); assertThat(response.getSnapshots().size(), equalTo(1)); snapshotStatus = response.getSnapshots().get(0); @@ -1461,7 +1461,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas } } - logger.info("--> checking that _current returns the currently running snapshot", blockedNode); + logger.info("--> checking that _current returns the currently running snapshot"); GetSnapshotsResponse getResponse = client.admin().cluster().prepareGetSnapshots("test-repo").setCurrentSnapshot().execute().actionGet(); assertThat(getResponse.getSnapshots().size(), equalTo(1)); SnapshotInfo snapshotInfo = getResponse.getSnapshots().get(0); @@ -1475,7 +1475,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas logger.info("--> done"); - logger.info("--> checking snapshot status again after snapshot is done", blockedNode); + logger.info("--> checking snapshot status again after snapshot is done"); response = client.admin().cluster().prepareSnapshotStatus("test-repo").addSnapshots("test-snap").execute().actionGet(); snapshotStatus = response.getSnapshots().get(0); assertThat(snapshotStatus.getIndices().size(), equalTo(1)); @@ -1486,11 +1486,11 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas assertThat(indexStatus.getShardsStats().getDoneShards(), equalTo(snapshotInfo.successfulShards())); assertThat(indexStatus.getShards().size(), equalTo(snapshotInfo.totalShards())); - logger.info("--> checking snapshot status after it is done with empty repository", blockedNode); + logger.info("--> checking snapshot status after it is done with empty repository"); response = client.admin().cluster().prepareSnapshotStatus().execute().actionGet(); assertThat(response.getSnapshots().size(), equalTo(0)); - logger.info("--> checking that _current no longer returns the snapshot", blockedNode); + logger.info("--> checking that _current no longer returns the snapshot"); assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("_current").execute().actionGet().getSnapshots().isEmpty(), equalTo(true)); try { @@ -1862,41 +1862,44 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas } else { waitForBlockOnAnyDataNode("test-repo", TimeValue.timeValueMinutes(1)); } - if (allowPartial) { - // partial snapshots allow close / delete operations - if (randomBoolean()) { - logger.info("--> delete index while partial snapshot is running"); - client.admin().indices().prepareDelete("test-idx-1").get(); - } else { - logger.info("--> close index while partial snapshot is running"); - client.admin().indices().prepareClose("test-idx-1").get(); - } - } else { - // non-partial snapshots do not allow close / delete operations on indices where snapshot has not been completed - if (randomBoolean()) { - try { - logger.info("--> delete index while non-partial snapshot is running"); + try { + if (allowPartial) { + // partial snapshots allow close / delete operations + if (randomBoolean()) { + logger.info("--> delete index while partial snapshot is running"); client.admin().indices().prepareDelete("test-idx-1").get(); - fail("Expected deleting index to fail during snapshot"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("Cannot delete indices that are being snapshotted: [test-idx-1]")); + } else { + logger.info("--> close index while partial snapshot is running"); + client.admin().indices().prepareClose("test-idx-1").get(); } } else { - try { - logger.info("--> close index while non-partial snapshot is running"); - client.admin().indices().prepareClose("test-idx-1").get(); - fail("Expected closing index to fail during snapshot"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("Cannot close indices that are being snapshotted: [test-idx-1]")); + // non-partial snapshots do not allow close / delete operations on indices where snapshot has not been completed + if (randomBoolean()) { + try { + logger.info("--> delete index while non-partial snapshot is running"); + client.admin().indices().prepareDelete("test-idx-1").get(); + fail("Expected deleting index to fail during snapshot"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("Cannot delete indices that are being snapshotted: [test-idx-1]")); + } + } else { + try { + logger.info("--> close index while non-partial snapshot is running"); + client.admin().indices().prepareClose("test-idx-1").get(); + fail("Expected closing index to fail during snapshot"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("Cannot close indices that are being snapshotted: [test-idx-1]")); + } } } - } - if (initBlocking) { - logger.info("--> unblock running master node"); - unblockNode(internalCluster().getMasterName()); - } else { - logger.info("--> unblock all data nodes"); - unblockAllDataNodes("test-repo"); + } finally { + if (initBlocking) { + logger.info("--> unblock running master node"); + unblockNode(internalCluster().getMasterName()); + } else { + logger.info("--> unblock all data nodes"); + unblockAllDataNodes("test-repo"); + } } logger.info("--> waiting for snapshot to finish"); CreateSnapshotResponse createSnapshotResponse = future.get(); @@ -1946,24 +1949,27 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas blockAllDataNodes("test-repo"); logger.info("--> execution will be blocked on all data nodes"); - logger.info("--> start restore"); - ListenableActionFuture restoreFut = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap") - .setWaitForCompletion(true) - .execute(); - - logger.info("--> waiting for block to kick in"); - waitForBlockOnAnyDataNode("test-repo", TimeValue.timeValueSeconds(60)); - - logger.info("--> close index while restore is running"); + final ListenableActionFuture restoreFut; try { - client.admin().indices().prepareClose("test-idx-1").get(); - fail("Expected closing index to fail during restore"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("Cannot close indices that are being restored: [test-idx-1]")); - } + logger.info("--> start restore"); + restoreFut = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap") + .setWaitForCompletion(true) + .execute(); - logger.info("--> unblocking all data nodes"); - unblockAllDataNodes("test-repo"); + logger.info("--> waiting for block to kick in"); + waitForBlockOnAnyDataNode("test-repo", TimeValue.timeValueMinutes(1)); + + logger.info("--> close index while restore is running"); + try { + client.admin().indices().prepareClose("test-idx-1").get(); + fail("Expected closing index to fail during restore"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("Cannot close indices that are being restored: [test-idx-1]")); + } + } finally { + logger.info("--> unblocking all data nodes"); + unblockAllDataNodes("test-repo"); + } logger.info("--> wait for restore to finish"); RestoreSnapshotResponse restoreSnapshotResponse = restoreFut.get(); @@ -2159,7 +2165,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas public void testListCorruptedSnapshot() throws Exception { Client client = client(); Path repo = randomRepoPath(); - logger.info("--> creating repository at " + repo.toAbsolutePath()); + logger.info("--> creating repository at {}", repo.toAbsolutePath()); assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType("fs").setSettings(Settings.settingsBuilder() .put("location", repo) diff --git a/core/src/test/java/org/elasticsearch/snapshots/SnapshotBackwardsCompatibilityIT.java b/core/src/test/java/org/elasticsearch/snapshots/SnapshotBackwardsCompatibilityIT.java index eb069d4721c..b3f466cdcc8 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/SnapshotBackwardsCompatibilityIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/SnapshotBackwardsCompatibilityIT.java @@ -215,7 +215,7 @@ public class SnapshotBackwardsCompatibilityIT extends ESBackcompatTestCase { logger.info("--> move from 0 to 1 replica"); client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)).get(); } - logger.debug("---> repo exists: " + Files.exists(tempDir.resolve("indices/test/0")) + " files: " + Arrays.toString(FileSystemUtils.files(tempDir.resolve("indices/test/0")))); // it's only one shard! + logger.debug("---> repo exists: {} files: {}", Files.exists(tempDir.resolve("indices/test/0")), Arrays.toString(FileSystemUtils.files(tempDir.resolve("indices/test/0")))); // it's only one shard! CreateSnapshotResponse createSnapshotResponseSecond = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-1").setWaitForCompletion(true).setIndices("test").get(); assertThat(createSnapshotResponseSecond.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat(createSnapshotResponseSecond.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponseSecond.getSnapshotInfo().totalShards())); diff --git a/core/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java b/core/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java index e92a28db86b..5743709e26a 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java +++ b/core/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java @@ -28,12 +28,11 @@ import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.inject.Module; import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.env.Environment; import org.elasticsearch.index.snapshots.IndexShardRepository; @@ -49,8 +48,6 @@ import java.io.UnsupportedEncodingException; import java.nio.file.Path; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; -import java.util.Collection; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; @@ -63,6 +60,10 @@ public class MockRepository extends FsRepository { public static class Plugin extends org.elasticsearch.plugins.Plugin { + public static final Setting USERNAME_SETTING = Setting.simpleString("secret.mock.username", Property.NodeScope); + public static final Setting PASSWORD_SETTING = + Setting.simpleString("secret.mock.password", Property.NodeScope, Property.Filtered); + @Override public String name() { return "mock-repository"; @@ -78,8 +79,8 @@ public class MockRepository extends FsRepository { } public void onModule(SettingsModule module) { - module.registerSettingsFilter("secret.mock.password"); - + module.registerSetting(USERNAME_SETTING); + module.registerSetting(PASSWORD_SETTING); } } @@ -120,7 +121,7 @@ public class MockRepository extends FsRepository { blockOnInitialization = repositorySettings.settings().getAsBoolean("block_on_init", false); randomPrefix = repositorySettings.settings().get("random", "default"); waitAfterUnblock = repositorySettings.settings().getAsLong("wait_after_unblock", 0L); - logger.info("starting mock repository with random prefix " + randomPrefix); + logger.info("starting mock repository with random prefix {}", randomPrefix); mockBlobStore = new MockBlobStore(super.blobStore()); } @@ -176,14 +177,12 @@ public class MockRepository extends FsRepository { } public synchronized void unblockExecution() { - if (blocked) { - blocked = false; - // Clean blocking flags, so we wouldn't try to block again - blockOnDataFiles = false; - blockOnControlFiles = false; - blockOnInitialization = false; - this.notifyAll(); - } + blocked = false; + // Clean blocking flags, so we wouldn't try to block again + blockOnDataFiles = false; + blockOnControlFiles = false; + blockOnInitialization = false; + this.notifyAll(); } public boolean blocked() { diff --git a/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportIT.java b/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportIT.java index ef408d16784..d9466d28424 100644 --- a/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportIT.java +++ b/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportIT.java @@ -157,7 +157,7 @@ public class NettyTransportIT extends ESIntegTestCase { try { transportChannel.sendResponse(e); } catch (IOException e1) { - logger.warn("Failed to send error message back to client for action [" + action + "]", e); + logger.warn("Failed to send error message back to client for action [{}]", e, action); logger.warn("Actual Exception", e1); } } @@ -194,7 +194,7 @@ public class NettyTransportIT extends ESIntegTestCase { try { transportChannel.sendResponse(e); } catch (Throwable e1) { - logger.warn("Failed to send error message back to client for action [" + reg.getAction() + "]", e1); + logger.warn("Failed to send error message back to client for action [{}]", e1, reg.getAction()); logger.warn("Actual Exception", e); } } } diff --git a/core/src/test/resources/indices/bwc/index-2.2.0.zip b/core/src/test/resources/indices/bwc/index-2.2.0.zip index b645084eeef..797ca24f4ed 100644 Binary files a/core/src/test/resources/indices/bwc/index-2.2.0.zip and b/core/src/test/resources/indices/bwc/index-2.2.0.zip differ diff --git a/core/src/test/resources/indices/bwc/repo-2.2.0.zip b/core/src/test/resources/indices/bwc/repo-2.2.0.zip index f895e11fdd7..f2208b734c0 100644 Binary files a/core/src/test/resources/indices/bwc/repo-2.2.0.zip and b/core/src/test/resources/indices/bwc/repo-2.2.0.zip differ diff --git a/core/src/test/resources/org/elasticsearch/index/mapper/all/mapping.json b/core/src/test/resources/org/elasticsearch/index/mapper/all/mapping.json index e0a7bfeba89..80fb091b973 100644 --- a/core/src/test/resources/org/elasticsearch/index/mapper/all/mapping.json +++ b/core/src/test/resources/org/elasticsearch/index/mapper/all/mapping.json @@ -2,7 +2,7 @@ "person":{ "_all":{ "enabled":true, - "omit_norms":true + "norms":false }, "properties":{ "name":{ diff --git a/core/src/test/resources/org/elasticsearch/index/mapper/update/all_mapping_create_index.json b/core/src/test/resources/org/elasticsearch/index/mapper/update/all_mapping_create_index.json index e9604ae458f..67b03178c82 100644 --- a/core/src/test/resources/org/elasticsearch/index/mapper/update/all_mapping_create_index.json +++ b/core/src/test/resources/org/elasticsearch/index/mapper/update/all_mapping_create_index.json @@ -7,7 +7,7 @@ "store_term_vector_offsets": true, "store_term_vector_positions": true, "store_term_vector_payloads": true, - "omit_norms": true, + "norms": false, "analyzer": "standard", "search_analyzer": "whitespace", "similarity": "my_similarity", diff --git a/core/src/test/resources/org/elasticsearch/index/mapper/update/all_mapping_update_with_conflicts.json b/core/src/test/resources/org/elasticsearch/index/mapper/update/all_mapping_update_with_conflicts.json index 6ddde341fc2..6164c3f5ca8 100644 --- a/core/src/test/resources/org/elasticsearch/index/mapper/update/all_mapping_update_with_conflicts.json +++ b/core/src/test/resources/org/elasticsearch/index/mapper/update/all_mapping_update_with_conflicts.json @@ -7,7 +7,7 @@ "store_term_vector_offsets": false, "store_term_vector_positions": false, "store_term_vector_payloads": false, - "omit_norms": false, + "norms": true, "analyzer": "whitespace", "search_analyzer": "standard", "similarity": "BM25", diff --git a/dev-tools/create_bwc_index.py b/dev-tools/create_bwc_index.py index c8ade7f866a..361934908ec 100644 --- a/dev-tools/create_bwc_index.py +++ b/dev-tools/create_bwc_index.py @@ -247,6 +247,25 @@ def generate_index(client, version, index_name): } } + mappings['norms'] = { + 'properties': { + 'string_with_norms_disabled': { + 'type': 'string', + 'norms': { + 'enabled': False + } + }, + 'string_with_norms_enabled': { + 'type': 'string', + 'index': 'not_analyzed', + 'norms': { + 'enabled': True, + 'loading': 'eager' + } + } + } + } + mappings['doc'] = { 'properties': { 'string': { diff --git a/distribution/deb/src/main/packaging/init.d/elasticsearch b/distribution/deb/src/main/packaging/init.d/elasticsearch index d0ad04cef7a..078e79a92d1 100755 --- a/distribution/deb/src/main/packaging/init.d/elasticsearch +++ b/distribution/deb/src/main/packaging/init.d/elasticsearch @@ -99,7 +99,7 @@ fi # Define other required variables PID_FILE="$PID_DIR/$NAME.pid" DAEMON=$ES_HOME/bin/elasticsearch -DAEMON_OPTS="-d -p $PID_FILE --default.path.home=$ES_HOME --default.path.logs=$LOG_DIR --default.path.data=$DATA_DIR --default.path.conf=$CONF_DIR" +DAEMON_OPTS="-d -p $PID_FILE -D es.default.path.home=$ES_HOME -D es.default.path.logs=$LOG_DIR -D es.default.path.data=$DATA_DIR -D es.default.path.conf=$CONF_DIR" export ES_HEAP_SIZE export ES_HEAP_NEWSIZE diff --git a/distribution/licenses/commons-cli-1.3.1.jar.sha1 b/distribution/licenses/commons-cli-1.3.1.jar.sha1 deleted file mode 100644 index fc366d027f5..00000000000 --- a/distribution/licenses/commons-cli-1.3.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1303efbc4b181e5a58bf2e967dc156a3132b97c0 diff --git a/distribution/licenses/jopt-simple-4.9.jar.sha1 b/distribution/licenses/jopt-simple-4.9.jar.sha1 new file mode 100644 index 00000000000..b86fa62ac20 --- /dev/null +++ b/distribution/licenses/jopt-simple-4.9.jar.sha1 @@ -0,0 +1 @@ +ee9e9eaa0a35360dcfeac129ff4923215fd65904 \ No newline at end of file diff --git a/distribution/licenses/jopt-simple-LICENSE.txt b/distribution/licenses/jopt-simple-LICENSE.txt new file mode 100644 index 00000000000..85f923a9526 --- /dev/null +++ b/distribution/licenses/jopt-simple-LICENSE.txt @@ -0,0 +1,24 @@ +/* + The MIT License + + Copyright (c) 2004-2015 Paul R. Holser, Jr. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ diff --git a/distribution/licenses/jopt-simple-NOTICE.txt b/distribution/licenses/jopt-simple-NOTICE.txt new file mode 100644 index 00000000000..e69de29bb2d diff --git a/distribution/licenses/lucene-analyzers-common-6.0.0-snapshot-bea235f.jar.sha1 b/distribution/licenses/lucene-analyzers-common-6.0.0-snapshot-bea235f.jar.sha1 deleted file mode 100644 index 74d21bae946..00000000000 --- a/distribution/licenses/lucene-analyzers-common-6.0.0-snapshot-bea235f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3510af19947deadd929123aaf14d69b4bdec759a \ No newline at end of file diff --git a/distribution/licenses/lucene-analyzers-common-6.0.0-snapshot-f0aa4fc.jar.sha1 b/distribution/licenses/lucene-analyzers-common-6.0.0-snapshot-f0aa4fc.jar.sha1 new file mode 100644 index 00000000000..2ed6eb6ef56 --- /dev/null +++ b/distribution/licenses/lucene-analyzers-common-6.0.0-snapshot-f0aa4fc.jar.sha1 @@ -0,0 +1 @@ +cd2388adc4b33c7530bbb8cd386e5c8c5c8e6aca \ No newline at end of file diff --git a/distribution/licenses/lucene-backward-codecs-6.0.0-snapshot-bea235f.jar.sha1 b/distribution/licenses/lucene-backward-codecs-6.0.0-snapshot-bea235f.jar.sha1 deleted file mode 100644 index ee6143bec14..00000000000 --- a/distribution/licenses/lucene-backward-codecs-6.0.0-snapshot-bea235f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -247ad7c17cb7c742d7a9abd5d9980e4fab815178 \ No newline at end of file diff --git a/distribution/licenses/lucene-backward-codecs-6.0.0-snapshot-f0aa4fc.jar.sha1 b/distribution/licenses/lucene-backward-codecs-6.0.0-snapshot-f0aa4fc.jar.sha1 new file mode 100644 index 00000000000..28cdb1db9b1 --- /dev/null +++ b/distribution/licenses/lucene-backward-codecs-6.0.0-snapshot-f0aa4fc.jar.sha1 @@ -0,0 +1 @@ +f5bbdd01b98fab7c18b46e762de3e39221b0c8fc \ No newline at end of file diff --git a/distribution/licenses/lucene-core-6.0.0-snapshot-bea235f.jar.sha1 b/distribution/licenses/lucene-core-6.0.0-snapshot-bea235f.jar.sha1 deleted file mode 100644 index 2d39f84d21e..00000000000 --- a/distribution/licenses/lucene-core-6.0.0-snapshot-bea235f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c0712dbec58abad545646edab67d58f7373f5329 \ No newline at end of file diff --git a/distribution/licenses/lucene-core-6.0.0-snapshot-f0aa4fc.jar.sha1 b/distribution/licenses/lucene-core-6.0.0-snapshot-f0aa4fc.jar.sha1 new file mode 100644 index 00000000000..c304106975b --- /dev/null +++ b/distribution/licenses/lucene-core-6.0.0-snapshot-f0aa4fc.jar.sha1 @@ -0,0 +1 @@ +18ad74518b34af7cfbd6c1e3a408920ff7665501 \ No newline at end of file diff --git a/distribution/licenses/lucene-grouping-6.0.0-snapshot-bea235f.jar.sha1 b/distribution/licenses/lucene-grouping-6.0.0-snapshot-bea235f.jar.sha1 deleted file mode 100644 index a3ce82c8a04..00000000000 --- a/distribution/licenses/lucene-grouping-6.0.0-snapshot-bea235f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7573e3efb12dd16fdc991edaf408877dab20c030 \ No newline at end of file diff --git a/distribution/licenses/lucene-grouping-6.0.0-snapshot-f0aa4fc.jar.sha1 b/distribution/licenses/lucene-grouping-6.0.0-snapshot-f0aa4fc.jar.sha1 new file mode 100644 index 00000000000..a95cc29cc7d --- /dev/null +++ b/distribution/licenses/lucene-grouping-6.0.0-snapshot-f0aa4fc.jar.sha1 @@ -0,0 +1 @@ +dc0b211e31b8f1e0ee3a9e8f9c71b13fa088dabf \ No newline at end of file diff --git a/distribution/licenses/lucene-highlighter-6.0.0-snapshot-bea235f.jar.sha1 b/distribution/licenses/lucene-highlighter-6.0.0-snapshot-bea235f.jar.sha1 deleted file mode 100644 index 9259a2c66c1..00000000000 --- a/distribution/licenses/lucene-highlighter-6.0.0-snapshot-bea235f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -96ef0a9a43a5fc99d27bb7e7d61517ee4c7e54a4 \ No newline at end of file diff --git a/distribution/licenses/lucene-highlighter-6.0.0-snapshot-f0aa4fc.jar.sha1 b/distribution/licenses/lucene-highlighter-6.0.0-snapshot-f0aa4fc.jar.sha1 new file mode 100644 index 00000000000..8f57bb02639 --- /dev/null +++ b/distribution/licenses/lucene-highlighter-6.0.0-snapshot-f0aa4fc.jar.sha1 @@ -0,0 +1 @@ +bbd503396c08546f1b9e023e77dbf25bbb052d1c \ No newline at end of file diff --git a/distribution/licenses/lucene-join-6.0.0-snapshot-bea235f.jar.sha1 b/distribution/licenses/lucene-join-6.0.0-snapshot-bea235f.jar.sha1 deleted file mode 100644 index 4959f5f163c..00000000000 --- a/distribution/licenses/lucene-join-6.0.0-snapshot-bea235f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d93de34947d37e31a337cdfed400333588c378d8 \ No newline at end of file diff --git a/distribution/licenses/lucene-join-6.0.0-snapshot-f0aa4fc.jar.sha1 b/distribution/licenses/lucene-join-6.0.0-snapshot-f0aa4fc.jar.sha1 new file mode 100644 index 00000000000..835bac49233 --- /dev/null +++ b/distribution/licenses/lucene-join-6.0.0-snapshot-f0aa4fc.jar.sha1 @@ -0,0 +1 @@ +96fd93d4a4192c42b0d56198b73a25440d4db2f7 \ No newline at end of file diff --git a/distribution/licenses/lucene-memory-6.0.0-snapshot-bea235f.jar.sha1 b/distribution/licenses/lucene-memory-6.0.0-snapshot-bea235f.jar.sha1 deleted file mode 100644 index 5218d0a019e..00000000000 --- a/distribution/licenses/lucene-memory-6.0.0-snapshot-bea235f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9c292930b1828e68f06509944a5346c141d56fd4 \ No newline at end of file diff --git a/distribution/licenses/lucene-memory-6.0.0-snapshot-f0aa4fc.jar.sha1 b/distribution/licenses/lucene-memory-6.0.0-snapshot-f0aa4fc.jar.sha1 new file mode 100644 index 00000000000..1e392d3e246 --- /dev/null +++ b/distribution/licenses/lucene-memory-6.0.0-snapshot-f0aa4fc.jar.sha1 @@ -0,0 +1 @@ +ddd44a319d201ff73cd25be139bd3175226ab5a5 \ No newline at end of file diff --git a/distribution/licenses/lucene-misc-6.0.0-snapshot-bea235f.jar.sha1 b/distribution/licenses/lucene-misc-6.0.0-snapshot-bea235f.jar.sha1 deleted file mode 100644 index 947722edfd3..00000000000 --- a/distribution/licenses/lucene-misc-6.0.0-snapshot-bea235f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -866ed93f48683e877ffa4d9baa1323dcffbc65d7 \ No newline at end of file diff --git a/distribution/licenses/lucene-misc-6.0.0-snapshot-f0aa4fc.jar.sha1 b/distribution/licenses/lucene-misc-6.0.0-snapshot-f0aa4fc.jar.sha1 new file mode 100644 index 00000000000..a21aaef33f5 --- /dev/null +++ b/distribution/licenses/lucene-misc-6.0.0-snapshot-f0aa4fc.jar.sha1 @@ -0,0 +1 @@ +07d943ecdc552632bdca8f2772fd081a02cbf589 \ No newline at end of file diff --git a/distribution/licenses/lucene-queries-6.0.0-snapshot-bea235f.jar.sha1 b/distribution/licenses/lucene-queries-6.0.0-snapshot-bea235f.jar.sha1 deleted file mode 100644 index 6caf86a6b96..00000000000 --- a/distribution/licenses/lucene-queries-6.0.0-snapshot-bea235f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -967d9c2647bdd4d88961747f7436a5a92aa0385b \ No newline at end of file diff --git a/distribution/licenses/lucene-queries-6.0.0-snapshot-f0aa4fc.jar.sha1 b/distribution/licenses/lucene-queries-6.0.0-snapshot-f0aa4fc.jar.sha1 new file mode 100644 index 00000000000..57fb022de53 --- /dev/null +++ b/distribution/licenses/lucene-queries-6.0.0-snapshot-f0aa4fc.jar.sha1 @@ -0,0 +1 @@ +66c72fd979f54480af75d01719ef25da62c0a8b6 \ No newline at end of file diff --git a/distribution/licenses/lucene-queryparser-6.0.0-snapshot-bea235f.jar.sha1 b/distribution/licenses/lucene-queryparser-6.0.0-snapshot-bea235f.jar.sha1 deleted file mode 100644 index b3e92d3f168..00000000000 --- a/distribution/licenses/lucene-queryparser-6.0.0-snapshot-bea235f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -981030d83a7504267f3141d7365fad9b46d51465 \ No newline at end of file diff --git a/distribution/licenses/lucene-queryparser-6.0.0-snapshot-f0aa4fc.jar.sha1 b/distribution/licenses/lucene-queryparser-6.0.0-snapshot-f0aa4fc.jar.sha1 new file mode 100644 index 00000000000..5c311c4bd9b --- /dev/null +++ b/distribution/licenses/lucene-queryparser-6.0.0-snapshot-f0aa4fc.jar.sha1 @@ -0,0 +1 @@ +8992204f922fe52af557e691cbfb4c54f92b76bd \ No newline at end of file diff --git a/distribution/licenses/lucene-sandbox-6.0.0-snapshot-bea235f.jar.sha1 b/distribution/licenses/lucene-sandbox-6.0.0-snapshot-bea235f.jar.sha1 deleted file mode 100644 index 7b5176c4963..00000000000 --- a/distribution/licenses/lucene-sandbox-6.0.0-snapshot-bea235f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -707691b1baf22c29020569f5b875d200a4955411 \ No newline at end of file diff --git a/distribution/licenses/lucene-sandbox-6.0.0-snapshot-f0aa4fc.jar.sha1 b/distribution/licenses/lucene-sandbox-6.0.0-snapshot-f0aa4fc.jar.sha1 new file mode 100644 index 00000000000..20f0037ea31 --- /dev/null +++ b/distribution/licenses/lucene-sandbox-6.0.0-snapshot-f0aa4fc.jar.sha1 @@ -0,0 +1 @@ +8565264e00bc43e1226ff0d2e986dbb26d353ce2 \ No newline at end of file diff --git a/distribution/licenses/lucene-spatial-6.0.0-snapshot-bea235f.jar.sha1 b/distribution/licenses/lucene-spatial-6.0.0-snapshot-bea235f.jar.sha1 deleted file mode 100644 index 9df2a16b886..00000000000 --- a/distribution/licenses/lucene-spatial-6.0.0-snapshot-bea235f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -be9e78130a069983f611f484d5b7b87bda0d6370 \ No newline at end of file diff --git a/distribution/licenses/lucene-spatial-6.0.0-snapshot-f0aa4fc.jar.sha1 b/distribution/licenses/lucene-spatial-6.0.0-snapshot-f0aa4fc.jar.sha1 new file mode 100644 index 00000000000..6a909857945 --- /dev/null +++ b/distribution/licenses/lucene-spatial-6.0.0-snapshot-f0aa4fc.jar.sha1 @@ -0,0 +1 @@ +98fc1bb7e005f33c388be66486341ad8168b72eb \ No newline at end of file diff --git a/distribution/licenses/lucene-spatial-extras-6.0.0-snapshot-bea235f.jar.sha1 b/distribution/licenses/lucene-spatial-extras-6.0.0-snapshot-bea235f.jar.sha1 deleted file mode 100644 index 6badc36d361..00000000000 --- a/distribution/licenses/lucene-spatial-extras-6.0.0-snapshot-bea235f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -edeef6ce8a58d5e6a074bebf545918d04e8579e1 \ No newline at end of file diff --git a/distribution/licenses/lucene-spatial-extras-6.0.0-snapshot-f0aa4fc.jar.sha1 b/distribution/licenses/lucene-spatial-extras-6.0.0-snapshot-f0aa4fc.jar.sha1 new file mode 100644 index 00000000000..b741ccd62a7 --- /dev/null +++ b/distribution/licenses/lucene-spatial-extras-6.0.0-snapshot-f0aa4fc.jar.sha1 @@ -0,0 +1 @@ +b5b651b0adbc2f404e091817282dabd7b432c677 \ No newline at end of file diff --git a/distribution/licenses/lucene-spatial3d-6.0.0-snapshot-bea235f.jar.sha1 b/distribution/licenses/lucene-spatial3d-6.0.0-snapshot-bea235f.jar.sha1 deleted file mode 100644 index 480ae590aed..00000000000 --- a/distribution/licenses/lucene-spatial3d-6.0.0-snapshot-bea235f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d86a7ba859576bdcee1dacd8f407ccf71f982c60 \ No newline at end of file diff --git a/distribution/licenses/lucene-spatial3d-6.0.0-snapshot-f0aa4fc.jar.sha1 b/distribution/licenses/lucene-spatial3d-6.0.0-snapshot-f0aa4fc.jar.sha1 new file mode 100644 index 00000000000..512e4b7b592 --- /dev/null +++ b/distribution/licenses/lucene-spatial3d-6.0.0-snapshot-f0aa4fc.jar.sha1 @@ -0,0 +1 @@ +334e194bf83c75f0ae165e3e72b6fa35c5d636c5 \ No newline at end of file diff --git a/distribution/licenses/lucene-suggest-6.0.0-snapshot-bea235f.jar.sha1 b/distribution/licenses/lucene-suggest-6.0.0-snapshot-bea235f.jar.sha1 deleted file mode 100644 index 7835298c4a2..00000000000 --- a/distribution/licenses/lucene-suggest-6.0.0-snapshot-bea235f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a3860de6502576f142dc948eb2005fa4dc0c27c5 \ No newline at end of file diff --git a/distribution/licenses/lucene-suggest-6.0.0-snapshot-f0aa4fc.jar.sha1 b/distribution/licenses/lucene-suggest-6.0.0-snapshot-f0aa4fc.jar.sha1 new file mode 100644 index 00000000000..3d2cf156d40 --- /dev/null +++ b/distribution/licenses/lucene-suggest-6.0.0-snapshot-f0aa4fc.jar.sha1 @@ -0,0 +1 @@ +89c46e9601cf8fb9acf77398838f8710c9e44053 \ No newline at end of file diff --git a/distribution/rpm/src/main/packaging/init.d/elasticsearch b/distribution/rpm/src/main/packaging/init.d/elasticsearch index c56944b7c3c..1132fca4f9e 100644 --- a/distribution/rpm/src/main/packaging/init.d/elasticsearch +++ b/distribution/rpm/src/main/packaging/init.d/elasticsearch @@ -117,7 +117,7 @@ start() { cd $ES_HOME echo -n $"Starting $prog: " # if not running, start it up here, usually something like "daemon $exec" - daemon --user $ES_USER --pidfile $pidfile $exec -p $pidfile -d -Des.default.path.home=$ES_HOME -Des.default.path.logs=$LOG_DIR -Des.default.path.data=$DATA_DIR -Des.default.path.conf=$CONF_DIR + daemon --user $ES_USER --pidfile $pidfile $exec -p $pidfile -d -D es.default.path.home=$ES_HOME -D es.default.path.logs=$LOG_DIR -D es.default.path.data=$DATA_DIR -D es.default.path.conf=$CONF_DIR retval=$? echo [ $retval -eq 0 ] && touch $lockfile diff --git a/docs/plugins/authors.asciidoc b/docs/plugins/authors.asciidoc index d0a606987ae..af3710f7e2d 100644 --- a/docs/plugins/authors.asciidoc +++ b/docs/plugins/authors.asciidoc @@ -26,9 +26,7 @@ https://github.com/elastic/elasticsearch/blob/master/buildSrc/src/main/resources Either fill in this template yourself (see https://github.com/lmenezes/elasticsearch-kopf/blob/master/plugin-descriptor.properties[elasticsearch-kopf] as an example) or, if you are using Elasticsearch's Gradle build system, you -can fill in the necessary values in the `build.gradle` file for your plugin. For -instance, see -https://github.com/elastic/elasticsearch/blob/master/plugins/site-example/build.gradle[`/plugins/site-example/build.gradle`]. +can fill in the necessary values in the `build.gradle` file for your plugin. [float] ==== Mandatory elements for plugins diff --git a/docs/plugins/mapper-attachments.asciidoc b/docs/plugins/mapper-attachments.asciidoc index 63742b518b5..e65ab9ff64a 100644 --- a/docs/plugins/mapper-attachments.asciidoc +++ b/docs/plugins/mapper-attachments.asciidoc @@ -405,41 +405,3 @@ It gives back: } } -------------------------- - -[[mapper-attachments-standalone]] -==== Stand alone runner - -If you want to run some tests within your IDE, you can use `StandaloneRunner` class. -It accepts arguments: - -* `-u file://URL/TO/YOUR/DOC` -* `--size` set extracted size (default to mapper attachment size) -* `BASE64` encoded binary - -Example: - -[source,sh] --------------------------- -StandaloneRunner BASE64Text -StandaloneRunner -u /tmp/mydoc.pdf -StandaloneRunner -u /tmp/mydoc.pdf --size 1000000 --------------------------- - -It produces something like: - -[source,text] --------------------------- -## Extracted text ---------------------- BEGIN ----------------------- -This is the extracted text ----------------------- END ------------------------ -## Metadata -- author: null -- content_length: null -- content_type: application/pdf -- date: null -- keywords: null -- language: null -- name: null -- title: null --------------------------- diff --git a/docs/reference/analysis/tokenfilters/compound-word-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/compound-word-tokenfilter.asciidoc index f77c47d156e..1268727b2ef 100644 --- a/docs/reference/analysis/tokenfilters/compound-word-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/compound-word-tokenfilter.asciidoc @@ -27,7 +27,8 @@ use. For languages like German they are quite good. XML based hyphenation grammar files can be found in the http://offo.sourceforge.net/hyphenation/#FOP+XML+Hyphenation+Patterns[Objects For Formatting Objects] -(OFFO) Sourceforge project. You can download http://downloads.sourceforge.net/offo/offo-hyphenation.zip[offo-hyphenation.zip] +(OFFO) Sourceforge project. Currently only FOP v1.2 compatible hyphenation files +are supported. You can download https://sourceforge.net/projects/offo/files/offo-hyphenation/1.2/offo-hyphenation_v1.2.zip/download[offo-hyphenation_v1.2.zip] directly and look in the `offo-hyphenation/hyph/` directory. Credits for the hyphenation code go to the Apache FOP project . diff --git a/docs/reference/migration/index.asciidoc b/docs/reference/migration/index.asciidoc index 57401cb01d7..0dda43da713 100644 --- a/docs/reference/migration/index.asciidoc +++ b/docs/reference/migration/index.asciidoc @@ -8,26 +8,15 @@ your application from one version of Elasticsearch to another. As a general rule: -* Migration between major versions -- e.g. `1.x` to `2.x` -- +* Migration between minor versions -- e.g. `5.x` to `5.y` -- can be + performed by <>. + +* Migration between consecutive major versions -- e.g. `2.x` to `5.x` -- requires a <>. -* Migration between minor versions -- e.g. `1.x` to `1.y` -- can be - performed by <>. +* Migration between non-consecutive major versions -- e.g. `1.x` to `5.x` -- + is not supported. See <> for more info. -- include::migrate_5_0.asciidoc[] - -include::migrate_2_3.asciidoc[] - -include::migrate_2_2.asciidoc[] - -include::migrate_2_1.asciidoc[] - -include::migrate_2_0.asciidoc[] - -include::migrate_1_6.asciidoc[] - -include::migrate_1_4.asciidoc[] - -include::migrate_1_0.asciidoc[] diff --git a/docs/reference/migration/migrate_1_0.asciidoc b/docs/reference/migration/migrate_1_0.asciidoc deleted file mode 100644 index 1e917c4a0d9..00000000000 --- a/docs/reference/migration/migrate_1_0.asciidoc +++ /dev/null @@ -1,372 +0,0 @@ -[[breaking-changes-1.0]] -== Breaking changes in 1.0 - -This section discusses the changes that you need to be aware of when migrating -your application to Elasticsearch 1.0. - -=== System and settings - -* Elasticsearch now runs in the foreground by default. There is no more `-f` - flag on the command line. Instead, to run elasticsearch as a daemon, use - the `-d` flag: - -[source,sh] ---------------- -./bin/elasticsearch -d ---------------- - -* Command line settings can now be passed without the `-Des.` prefix, for - instance: - -[source,sh] ---------------- -./bin/elasticsearch --node.name=search_1 --cluster.name=production ---------------- - -* Elasticsearch on 64 bit Linux now uses <> by default. Make - sure that you set <> to a sufficiently high - number. The RPM and Debian packages default this value to `262144`. - -* The RPM and Debian packages no longer start Elasticsearch by default. - -* The `cluster.routing.allocation` settings (`disable_allocation`, - `disable_new_allocation` and `disable_replica_location`) have been - <>: -+ -[source,yaml] ---------------- -cluster.routing.allocation.enable: all|primaries|new_primaries|none ---------------- - -=== Stats and Info APIs - -The <>, <>, -<> and <> -APIs have all been changed to make their format more RESTful and less clumsy. - -For instance, if you just want the `nodes` section of the `cluster_state`, -instead of: - -[source,sh] ---------------- -GET /_cluster/state?filter_metadata&filter_routing_table&filter_blocks ---------------- - -you now use: - -[source,sh] ---------------- -GET /_cluster/state/nodes ---------------- - -Similarly for the `nodes_stats` API, if you want the `transport` and `http` -metrics only, instead of: - -[source,sh] ---------------- -GET /_nodes/stats?clear&transport&http ---------------- - -you now use: - -[source,sh] ---------------- -GET /_nodes/stats/transport,http ---------------- - -See the links above for full details. - - -=== Indices APIs - -The `mapping`, `alias`, `settings`, and `warmer` index APIs are all similar -but there are subtle differences in the order of the URL and the response -body. For instance, adding a mapping and a warmer look slightly different: - -[source,sh] ---------------- -PUT /{index}/{type}/_mapping -PUT /{index}/_warmer/{name} ---------------- - -These URLs have been unified as: - -[source,sh] ---------------- -PUT /{indices}/_mapping/{type} -PUT /{indices}/_alias/{name} -PUT /{indices}/_warmer/{name} - -GET /{indices}/_mapping/{types} -GET /{indices}/_alias/{names} -GET /{indices}/_settings/{names} -GET /{indices}/_warmer/{names} - -DELETE /{indices}/_mapping/{types} -DELETE /{indices}/_alias/{names} -DELETE /{indices}/_warmer/{names} ---------------- - -All of the `{indices}`, `{types}` and `{names}` parameters can be replaced by: - - * `_all`, `*` or blank (ie left out altogether), all of which mean ``all'' - * wildcards like `test*` - * comma-separated lists: `index_1,test_*` - -The only exception is `DELETE` which doesn't accept blank (missing) -parameters. If you want to delete something, you should be specific. - -Similarly, the return values for `GET` have been unified with the following -rules: - -* Only return values that exist. If you try to `GET` a mapping which doesn't - exist, then the result will be an empty object: `{}`. We no longer throw a - `404` if the requested mapping/warmer/alias/setting doesn't exist. - -* The response format always has the index name, then the section, then the - element name, for instance: -+ -[source,js] ---------------- -{ - "my_index": { - "mappings": { - "my_type": {...} - } - } -} ---------------- -+ -This is a breaking change for the `get_mapping` API. - -In the future we will also provide plural versions to allow putting multiple mappings etc in a single request. - -See <>, <>, <>, -<>, <>, -`warmers`, and <> for more details. - -=== Index request - -Previously a document could be indexed as itself, or wrapped in an outer -object which specified the `type` name: - -[source,js] ---------------- -PUT /my_index/my_type/1 -{ - "my_type": { - ... doc fields ... - } -} ---------------- - -This led to some ambiguity when a document also included a field with the same -name as the `type`. We no longer accept the outer `type` wrapper, but this -behaviour can be reenabled on an index-by-index basis with the setting: -`index.mapping.allow_type_wrapper`. - -=== Search requests - -While the `search` API takes a top-level `query` parameter, the -<>, `delete-by-query` and -<> requests expected the whole body to be a -query. These now _require_ a top-level `query` parameter: - -[source,js] ---------------- -GET /_count -{ - "query": { - "match": { - "title": "Interesting stuff" - } - } -} ---------------- - -Also, the top-level `filter` parameter in search has been renamed to -<>, to indicate that it should not -be used as the primary way to filter search results (use a -<> instead), but only to filter -results AFTER aggregations have been calculated. - -This example counts the top colors in all matching docs, but only returns docs -with color `red`: - -[source,js] ---------------- -GET /_search -{ - "query": { - "match_all": {} - }, - "aggs": { - "colors": { - "terms": { "field": "color" } - } - }, - "post_filter": { - "term": { - "color": "red" - } - } -} ---------------- - -=== Multi-fields - -Multi-fields are dead! Long live multi-fields! Well, the field type -`multi_field` has been removed. Instead, any of the core field types -(excluding `object` and `nested`) now accept a `fields` parameter. It's the -same thing, but nicer. Instead of: - -[source,js] ---------------- -"title": { - "type": "multi_field", - "fields": { - "title": { "type": "string" }, - "raw": { "type": "string", "index": "not_analyzed" } - } -} ---------------- - -you can now write: - -[source,js] ---------------- -"title": { - "type": "string", - "fields": { - "raw": { "type": "string", "index": "not_analyzed" } - } -} ---------------- - -Existing multi-fields will be upgraded to the new format automatically. - -Also, instead of having to use the arcane `path` and `index_name` parameters -in order to index multiple fields into a single ``custom +_all+ field'', you -can now use the <>. - -=== Stopwords - -Previously, the <> and -<> analyzers used the list of English stopwords -by default, which caused some hard to debug indexing issues. Now they are set to -use the empty stopwords list (ie `_none_`) instead. - -=== Dates without years - -When dates are specified without a year, for example: `Dec 15 10:00:00` they -are treated as dates in 2000 during indexing and range searches... except for -the upper included bound `lte` where they were treated as dates in 1970! Now, -all https://github.com/elastic/elasticsearch/issues/4451[dates without years] -use `1970` as the default. - -=== Parameters - -* Geo queries used to use `miles` as the default unit. And we - http://en.wikipedia.org/wiki/Mars_Climate_Orbiter[all know what - happened at NASA] because of that decision. The new default unit is - https://github.com/elastic/elasticsearch/issues/4515[`meters`]. - -* For all queries that support _fuzziness_, the `min_similarity`, `fuzziness` - and `edit_distance` parameters have been unified as the single parameter - `fuzziness`. See <> for details of accepted values. - -* The `ignore_missing` parameter has been replaced by the `expand_wildcards`, - `ignore_unavailable` and `allow_no_indices` parameters, all of which have - sensible defaults. See <> for more. - -* An index name (or pattern) is now required for destructive operations like - deleting indices: -+ -[source,sh] ---------------- -# v0.90 - delete all indices: -DELETE / - -# v1.0 - delete all indices: -DELETE /_all -DELETE /* ---------------- -+ -Setting `action.destructive_requires_name` to `true` provides further safety -by disabling wildcard expansion on destructive actions. - -=== Return values - -* The `ok` return value has been removed from all response bodies as it added - no useful information. - -* The `found`, `not_found` and `exists` return values have been unified as - `found` on all relevant APIs. - -* Field values, in response to the <> - parameter, are now always returned as arrays. A field could have single or - multiple values, which meant that sometimes they were returned as scalars - and sometimes as arrays. By always returning arrays, this simplifies user - code. The only exception to this rule is when `fields` is used to retrieve - metadata like the `routing` value, which are always singular. Metadata - fields are always returned as scalars. -+ -The `fields` parameter is intended to be used for retrieving stored fields, -rather than for fields extracted from the `_source`. That means that it can no -longer be used to return whole objects and it no longer accepts the -`_source.fieldname` format. For these you should use the -<> -parameters instead. - -* Settings, like `index.analysis.analyzer.default` are now returned as proper - nested JSON objects, which makes them easier to work with programmatically: -+ -[source,js] ---------------- -{ - "index": { - "analysis": { - "analyzer": { - "default": xxx - } - } - } -} ---------------- -+ -You can choose to return them in flattened format by passing `?flat_settings` -in the query string. - -* The <> API no longer supports the text response - format, but does support JSON and YAML. - -=== Deprecations - -* The `text` query has been removed. Use the - <> query instead. - -* The `field` query has been removed. Use the - <> query instead. - -* Per-document boosting with the `_boost` field has - been removed. You can use the - <> instead. - -* The `path` parameter in mappings has been deprecated. Use the - <> parameter instead. - -* The `custom_score` and `custom_boost_score` is no longer supported. You can - use <> instead. - -=== Percolator - -The percolator has been redesigned and because of this the dedicated `_percolator` index is no longer used by the percolator, -but instead the percolator works with a dedicated `.percolator` type. Read the http://www.elastic.co/blog/percolator-redesign-blog-post[redesigned percolator] -blog post for the reasons why the percolator has been redesigned. - -Elasticsearch will *not* delete the `_percolator` index when upgrading, only the percolate api will not use the queries -stored in the `_percolator` index. In order to use the already stored queries, you can just re-index the queries from the -`_percolator` index into any index under the reserved `.percolator` type. The format in which the percolate queries -were stored has *not* been changed. So a simple script that does a scan search to retrieve all the percolator queries -and then does a bulk request into another index should be sufficient. diff --git a/docs/reference/migration/migrate_1_4.asciidoc b/docs/reference/migration/migrate_1_4.asciidoc deleted file mode 100644 index c20504bbddf..00000000000 --- a/docs/reference/migration/migrate_1_4.asciidoc +++ /dev/null @@ -1,92 +0,0 @@ -[[breaking-changes-1.4]] -== Breaking changes in 1.4 - -This section discusses the changes that you need to be aware of when migrating -your application from Elasticsearch 1.x to Elasticsearch 1.4. - -[float] -=== Percolator - -In indices created with version `1.4.0` or later, percolation queries can only -refer to fields that already exist in the mappings in that index. There are -two ways to make sure that a field mapping exist: - -* Add or update a mapping via the <> or - <> apis. -* Percolate a document before registering a query. Percolating a document can - add field mappings dynamically, in the same way as happens when indexing a - document. - -[float] -=== Aliases - -<> can include <> which -are automatically applied to any search performed via the alias. -<> created with version `1.4.0` or later can only -refer to field names which exist in the mappings of the index (or indices) -pointed to by the alias. - -Add or update a mapping via the <> or -<> apis. - -[float] -=== Indices APIs - -The get warmer api will return a section for `warmers` even if there are -no warmers. This ensures that the following two examples are equivalent: - -[source,js] --------------------------------------------------- -curl -XGET 'http://localhost:9200/_all/_warmers' - -curl -XGET 'http://localhost:9200/_warmers' --------------------------------------------------- - -The <> will return a section for `aliases` even if there are -no aliases. This ensures that the following two examples are equivalent: - -[source,js] --------------------------------------------------- -curl -XGET 'http://localhost:9200/_all/_aliases' - -curl -XGET 'http://localhost:9200/_aliases' --------------------------------------------------- - -The <> will return a section for `mappings` even if there are -no mappings. This ensures that the following two examples are equivalent: - -[source,js] --------------------------------------------------- -curl -XGET 'http://localhost:9200/_all/_mappings' - -curl -XGET 'http://localhost:9200/_mappings' --------------------------------------------------- - -[float] -=== Bulk UDP - -Bulk UDP has been deprecated and will be removed in 2.0. -You should use <> instead. -Each cluster must have an elected master node in order to be fully operational. Once a node loses its elected master -node it will reject some or all operations. - -[float] -=== Zen discovery - -On versions before `1.4.0.Beta1` all operations are rejected when a node loses its elected master. From `1.4.0.Beta1` -only write operations will be rejected by default. Read operations will still be served based on the information available -to the node, which may result in being partial and possibly also stale. If the default is undesired then the -pre `1.4.0.Beta1` behaviour can be enabled, see: <> - -[float] -=== More Like This Field - -The More Like This Field query has been deprecated in favor of the <> -restrained set to a specific `field`. It will be removed in 2.0. - -[float] -=== MVEL is deprecated - -Groovy is the new default scripting language in Elasticsearch, and is enabled in `sandbox` mode -by default. MVEL has been removed from core, but is available as a plugin: -https://github.com/elastic/elasticsearch-lang-mvel diff --git a/docs/reference/migration/migrate_1_6.asciidoc b/docs/reference/migration/migrate_1_6.asciidoc deleted file mode 100644 index 9540d3b6759..00000000000 --- a/docs/reference/migration/migrate_1_6.asciidoc +++ /dev/null @@ -1,17 +0,0 @@ -[[breaking-changes-1.6]] -== Breaking changes in 1.6 - -This section discusses the changes that you need to be aware of when migrating -your application from Elasticsearch 1.x to Elasticsearch 1.6. - -[float] -=== More Like This API - -The More Like This API query has been deprecated and will be removed in 2.0. Instead use the <>. - -[float] -=== `top_children` query - -The `top_children` query has been deprecated and will be removed in 2.0. Instead the `has_child` query should be used. -The `top_children` query isn't always faster than the `has_child` query and the `top_children` query is often inaccurate. -The total hits and any aggregations in the same search request will likely be off. diff --git a/docs/reference/migration/migrate_2_0.asciidoc b/docs/reference/migration/migrate_2_0.asciidoc deleted file mode 100644 index adf12e7da5c..00000000000 --- a/docs/reference/migration/migrate_2_0.asciidoc +++ /dev/null @@ -1,73 +0,0 @@ -[[breaking-changes-2.0]] -== Breaking changes in 2.0 - -This section discusses the changes that you need to be aware of when migrating -your application to Elasticsearch 2.0. - -[float] -=== Indices created before 0.90 - -Elasticsearch 2.0 can read indices created in version 0.90 and above. If any -of your indices were created before 0.90 you will need to upgrade to the -latest 1.x version of Elasticsearch first, in order to upgrade your indices or -to delete the old indices. Elasticsearch will not start in the presence of old -indices. - -[float] -=== Elasticsearch migration plugin - -We have provided the https://github.com/elastic/elasticsearch-migration[Elasticsearch migration plugin] -to help you detect any issues that you may have when upgrading to -Elasticsearch 2.0. Please install and run the plugin *before* upgrading. - -[float] -=== Also see - -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> - -include::migrate_2_0/removals.asciidoc[] - -include::migrate_2_0/network.asciidoc[] - -include::migrate_2_0/striping.asciidoc[] - -include::migrate_2_0/mapping.asciidoc[] - -include::migrate_2_0/crud.asciidoc[] - -include::migrate_2_0/query_dsl.asciidoc[] - -include::migrate_2_0/search.asciidoc[] - -include::migrate_2_0/aggs.asciidoc[] - -include::migrate_2_0/parent_child.asciidoc[] - -include::migrate_2_0/scripting.asciidoc[] - -include::migrate_2_0/index_apis.asciidoc[] - -include::migrate_2_0/snapshot_restore.asciidoc[] - -include::migrate_2_0/packaging.asciidoc[] - -include::migrate_2_0/settings.asciidoc[] - -include::migrate_2_0/stats.asciidoc[] - -include::migrate_2_0/java.asciidoc[] diff --git a/docs/reference/migration/migrate_2_0/aggs.asciidoc b/docs/reference/migration/migrate_2_0/aggs.asciidoc deleted file mode 100644 index 1351b4cb4a3..00000000000 --- a/docs/reference/migration/migrate_2_0/aggs.asciidoc +++ /dev/null @@ -1,70 +0,0 @@ -[[breaking_20_aggregation_changes]] -=== Aggregation changes - -==== Min doc count defaults to zero - -Both the `histogram` and `date_histogram` aggregations now have a default -`min_doc_count` of `0` instead of `1`. - -==== Timezone for date field - -Specifying the `time_zone` parameter in queries or aggregations on fields of -type `date` must now be either an ISO 8601 UTC offset, or a timezone id. For -example, the value `+1:00` must now be written as `+01:00`. - -==== Time zones and offsets - -The `histogram` and the `date_histogram` aggregation now support a simplified -`offset` option that replaces the previous `pre_offset` and `post_offset` -rounding options. Instead of having to specify two separate offset shifts of -the underlying buckets, the `offset` option moves the bucket boundaries in -positive or negative direction depending on its argument. - -The `date_histogram` options for `pre_zone` and `post_zone` are replaced by -the `time_zone` option. The behavior of `time_zone` is equivalent to the -former `pre_zone` option. Setting `time_zone` to a value like "+01:00" now -will lead to the bucket calculations being applied in the specified time zone. -The `key` is returned as the timestamp in UTC, but the `key_as_string` is -returned in the time zone specified. - -In addition to this, the `pre_zone_adjust_large_interval` is removed because -we now always return dates and bucket keys in UTC. - -==== Including/excluding terms - -`include`/`exclude` filtering on the `terms` aggregation now uses the same -syntax as <> instead of the Java regular -expression syntax. While simple regexps should still work, more complex ones -might need some rewriting. Also, the `flags` parameter is no longer supported. - -==== Boolean fields - -Aggregations on `boolean` fields will now return `0` and `1` as keys, and -`"true"` and `"false"` as string keys. See <> for more -information. - - -==== Java aggregation classes - -The `date_histogram` aggregation now returns a `Histogram` object in the -response, and the `DateHistogram` class has been removed. Similarly the -`date_range`, `ipv4_range`, and `geo_distance` aggregations all return a -`Range` object in the response, and the `IPV4Range`, `DateRange`, and -`GeoDistance` classes have been removed. - -The motivation for this is to have a single response API for the Range and -Histogram aggregations regardless of the type of data being queried. To -support this some changes were made in the `MultiBucketAggregation` interface -which applies to all bucket aggregations: - -* The `getKey()` method now returns `Object` instead of `String`. The actual - object type returned depends on the type of aggregation requested (e.g. the - `date_histogram` will return a `DateTime` object for this method whereas a - `histogram` will return a `Number`). -* A `getKeyAsString()` method has been added to return the String - representation of the key. -* All other `getKeyAsX()` methods have been removed. -* The `getBucketAsKey(String)` methods have been removed on all aggregations - except the `filters` and `terms` aggregations. - - diff --git a/docs/reference/migration/migrate_2_0/crud.asciidoc b/docs/reference/migration/migrate_2_0/crud.asciidoc deleted file mode 100644 index ef3ba93e67e..00000000000 --- a/docs/reference/migration/migrate_2_0/crud.asciidoc +++ /dev/null @@ -1,130 +0,0 @@ -[[breaking_20_crud_and_routing_changes]] -=== CRUD and routing changes - -==== Explicit custom routing - -Custom `routing` values can no longer be extracted from the document body, but -must be specified explicitly as part of the query string, or in the metadata -line in the <> API. See <> for an -example. - -==== Routing hash function - -The default hash function that is used for routing has been changed from -`djb2` to `murmur3`. This change should be transparent unless you relied on -very specific properties of `djb2`. This will help ensure a better balance of -the document counts between shards. - -In addition, the following routing-related node settings have been deprecated: - -`cluster.routing.operation.hash.type`:: - - This was an undocumented setting that allowed to configure which hash function - to use for routing. `murmur3` is now enforced on new indices. - -`cluster.routing.operation.use_type`:: - - This was an undocumented setting that allowed to take the `_type` of the - document into account when computing its shard (default: `false`). `false` is - now enforced on new indices. - -==== Delete API with custom routing - -The delete API used to be broadcast to all shards in the index which meant -that, when using custom routing, the `routing` parameter was optional. Now, -the delete request is forwarded only to the shard holding the document. If you -are using custom routing then you should specify the `routing` value when -deleting a document, just as is already required for the `index`, `create`, -and `update` APIs. - -To make sure that you never forget a routing value, make routing required with -the following mapping: - -[source,js] ---------------------------- -PUT my_index -{ - "mappings": { - "my_type": { - "_routing": { - "required": true - } - } - } -} ---------------------------- - -==== All stored meta-fields returned by default - -Previously, meta-fields like `_routing`, `_timestamp`, etc would only be -included in a GET request if specifically requested with the `fields` -parameter. Now, all meta-fields which have stored values will be returned by -default. Additionally, they are now returned at the top level (along with -`_index`, `_type`, and `_id`) instead of in the `fields` element. - -For instance, the following request: - -[source,sh] ---------------- -GET /my_index/my_type/1 ---------------- - -might return: - -[source,js] ---------------- -{ - "_index": "my_index", - "_type": "my_type", - "_id": "1", - "_timestamp": 10000000, <1> - "_source": { - "foo" : [ "bar" ] - } -} ---------------- -<1> The `_timestamp` is returned by default, and at the top level. - - -==== Async replication - -The `replication` parameter has been removed from all CRUD operations -(`index`, `create`, `update`, `delete`, `bulk`) as it interfered with the -<> feature. These operations are now -synchronous only and a request will only return once the changes have been -replicated to all active shards in the shard group. - -Instead, use more client processes to send more requests in parallel. - -==== Documents must be specified without a type wrapper - -Previously, the document body could be wrapped in another object with the name -of the `type`: - -[source,js] --------------------------- -PUT my_index/my_type/1 -{ - "my_type": { <1> - "text": "quick brown fox" - } -} --------------------------- -<1> This `my_type` wrapper is not part of the document itself, but represents the document type. - -This feature was deprecated before but could be reenabled with the -`mapping.allow_type_wrapper` index setting. This setting is no longer -supported. The above document should be indexed as follows: - -[source,js] --------------------------- -PUT my_index/my_type/1 -{ - "text": "quick brown fox" -} --------------------------- - -==== Term Vectors API - -Usage of `/_termvector` is deprecated in favor of `/_termvectors`. - diff --git a/docs/reference/migration/migrate_2_0/index_apis.asciidoc b/docs/reference/migration/migrate_2_0/index_apis.asciidoc deleted file mode 100644 index c177a887866..00000000000 --- a/docs/reference/migration/migrate_2_0/index_apis.asciidoc +++ /dev/null @@ -1,43 +0,0 @@ -[[breaking_20_index_api_changes]] -=== Index API changes - -==== Index aliases - - -Fields used in alias filters no longer have to exist in the mapping at alias -creation time. Previously, alias filters were parsed at alias creation time -and the parsed form was cached in memory. Now, alias filters are parsed at -request time and the fields in filters are resolved from the current mapping. - -This also means that index aliases now support `has_parent` and `has_child` -queries. - -The <> will now throw an exception if no -matching aliases are found. This change brings the defaults for this API in -line with the other Indices APIs. The <> options can be used on a -request to change this behavior. - -==== File based index templates - -Index templates can no longer be configured on disk. Use the -<> API instead. - -==== Analyze API changes - - -The Analyze API now returns the `position` of the first token as `0` -instead of `1`. - -The `prefer_local` parameter has been removed. The `_analyze` API is a light -operation and the caller shouldn't be concerned about whether it executes on -the node that receives the request or another node. - -The `text()` method on `AnalyzeRequest` now returns `String[]` instead of -`String`. - -==== Removed `id_cache` from clear cache api - -The <> API no longer supports the `id_cache` -option. Instead, use the `fielddata` option to clear the cache for the -`_parent` field. - diff --git a/docs/reference/migration/migrate_2_0/java.asciidoc b/docs/reference/migration/migrate_2_0/java.asciidoc deleted file mode 100644 index b2f5ee63e0d..00000000000 --- a/docs/reference/migration/migrate_2_0/java.asciidoc +++ /dev/null @@ -1,147 +0,0 @@ -[[breaking_20_java_api_changes]] -=== Java API changes - -==== Transport API construction - -The `TransportClient` construction code has changed, it now uses the builder -pattern. Instead of: - -[source,java] --------------------------------------------------- -Settings settings = Settings.settingsBuilder() - .put("cluster.name", "myClusterName").build(); -Client client = new TransportClient(settings); --------------------------------------------------- - -Use the following: - -[source,java] --------------------------------------------------- -Settings settings = Settings.settingsBuilder() - .put("cluster.name", "myClusterName").build(); -Client client = TransportClient.builder().settings(settings).build(); --------------------------------------------------- - -The transport client also no longer supports loading settings from config files. -If you have a config file, you can load it into settings yourself before -constructing the transport client: - -[source,java] --------------------------------------------------- -Settings settings = Settings.settingsBuilder() - .loadFromPath(pathToYourSettingsFile).build(); -Client client = TransportClient.builder().settings(settings).build(); --------------------------------------------------- - -==== Exception are only thrown on total failure - -Previously, many APIs would throw an exception if any shard failed to execute -the request. Now the exception is only thrown if all shards fail the request. -The responses for these APIs will always have a `getShardFailures` method that -you can and should check for failures. - - -==== IndexMissingException removed. - -Use `IndexNotFoundException` instead. - - -==== Automatically thread client listeners - -Previously, the user had to set request listener threads to `true` when on the -client side in order not to block IO threads on heavy operations. This proved -to be very trappy for users, and ended up creating problems that are very hard -to debug. - -In 2.0, Elasticsearch automatically threads listeners that are used from the -client when the client is a node client or a transport client. Threading can -no longer be manually set. - - -==== Query/filter refactoring - -`org.elasticsearch.index.queries.FilterBuilders` has been removed as part of the merge of -queries and filters. These filters are now available in `QueryBuilders` with the same name. -All methods that used to accept a `FilterBuilder` now accept a `QueryBuilder` instead. - -In addition some query builders have been removed or renamed: - -* `commonTerms(...)` renamed with `commonTermsQuery(...)` -* `queryString(...)` renamed with `queryStringQuery(...)` -* `simpleQueryString(...)` renamed with `simpleQueryStringQuery(...)` -* `textPhrase(...)` removed -* `textPhrasePrefix(...)` removed -* `textPhrasePrefixQuery(...)` removed -* `filtered(...)` removed. Use `filteredQuery(...)` instead. -* `inQuery(...)` removed. - -==== GetIndexRequest - -`GetIndexRequest.features()` now returns an array of Feature Enums instead of an array of String values. - -The following deprecated methods have been removed: - -* `GetIndexRequest.addFeatures(String[])` - Use - `GetIndexRequest.addFeatures(Feature[])` instead - -* `GetIndexRequest.features(String[])` - Use - `GetIndexRequest.features(Feature[])` instead. - -* `GetIndexRequestBuilder.addFeatures(String[])` - Use - `GetIndexRequestBuilder.addFeatures(Feature[])` instead. - -* `GetIndexRequestBuilder.setFeatures(String[])` - Use - `GetIndexRequestBuilder.setFeatures(Feature[])` instead. - - -==== BytesQueryBuilder removed - -The redundant BytesQueryBuilder has been removed in favour of the -WrapperQueryBuilder internally. - -==== TermsQueryBuilder execution removed - -The `TermsQueryBuilder#execution` method has been removed as it has no effect, it is ignored by the - corresponding parser. - -==== ImmutableSettings removed - -Use `Settings.builder()` instead of `ImmutableSettings.builder()`. - -==== InetSocketTransportAddress removed - -Use `InetSocketTransportAddress(InetSocketAddress address)` instead of `InetSocketTransportAddress(String, int)`. -You can create an InetSocketAddress instance with `InetSocketAddress(String, int)`. For example: - -[source,java] ------------------------------ -new InetSocketTransportAddress(new InetSocketAddress("127.0.0.1", 0)); ------------------------------ - -==== Request Builders refactoring - -An `action` parameter has been added to various request builders: - -* Instead of `new SnapshotsStatusRequestBuilder(elasticSearchClient)` use `new SnapshotsStatusRequestBuilder(elasticSearchClient, SnapshotsStatusAction.INSTANCE)`. - -* Instead of `new CreateSnapshotRequestBuilder(elasticSearchClient)` use `new CreateSnapshotRequestBuilder(elasticSearchClient, CreateSnapshotAction.INSTANCE)`. - -* Instead of `new CreateIndexRequestBuilder(elasticSearchClient, index)` use `new CreateIndexRequestBuilder(elasticSearchClient, CreateIndexAction.INSTANCE, index)`. - -==== Shading and package relocation removed - -Elasticsearch used to shade its dependencies and to relocate packages. We no longer use shading or relocation. -You might need to change your imports to the original package names: - -* `com.google.common` was `org.elasticsearch.common` -* `com.carrotsearch.hppc` was `org.elasticsearch.common.hppc` -* `jsr166e` was `org.elasticsearch.common.util.concurrent.jsr166e` -* `com.fasterxml.jackson` was `org.elasticsearch.common.jackson` -* `org.joda.time` was `org.elasticsearch.common.joda.time` -* `org.joda.convert` was `org.elasticsearch.common.joda.convert` -* `org.jboss.netty` was `org.elasticsearch.common.netty` -* `com.ning.compress` was `org.elasticsearch.common.compress` -* `com.github.mustachejava` was `org.elasticsearch.common.mustache` -* `com.tdunning.math.stats` was `org.elasticsearch.common.stats` -* `org.apache.commons.lang` was `org.elasticsearch.common.lang` -* `org.apache.commons.cli` was `org.elasticsearch.common.cli.commons` diff --git a/docs/reference/migration/migrate_2_0/mapping.asciidoc b/docs/reference/migration/migrate_2_0/mapping.asciidoc deleted file mode 100644 index b4ee0d54412..00000000000 --- a/docs/reference/migration/migrate_2_0/mapping.asciidoc +++ /dev/null @@ -1,439 +0,0 @@ -[[breaking_20_mapping_changes]] -=== Mapping changes - -A number of changes have been made to mappings to remove ambiguity and to -ensure that conflicting mappings cannot be created. - -One major change is that dynamically added fields must have their mapping -confirmed by the master node before indexing continues. This is to avoid a -problem where different shards in the same index dynamically add different -mappings for the same field. These conflicting mappings can silently return -incorrect results and can lead to index corruption. - -This change can make indexing slower when frequently adding many new fields. -We are looking at ways of optimising this process but we chose safety over -performance for this extreme use case. - -==== Conflicting field mappings - -Fields with the same name, in the same index, in different types, must have -the same mapping, with the exception of the <>, <>, -<>, <>, <>, and <> -parameters, which may have different settings per field. - -[source,js] ---------------- -PUT my_index -{ - "mappings": { - "type_one": { - "properties": { - "name": { <1> - "type": "string" - } - } - }, - "type_two": { - "properties": { - "name": { <1> - "type": "string", - "analyzer": "english" - } - } - } - } -} ---------------- -<1> The two `name` fields have conflicting mappings and will prevent Elasticsearch - from starting. - -Elasticsearch will not start in the presence of conflicting field mappings. -These indices must be deleted or reindexed using a new mapping. - -The `ignore_conflicts` option of the put mappings API has been removed. -Conflicts can't be ignored anymore. - -==== Fields cannot be referenced by short name - -A field can no longer be referenced using its short name. Instead, the full -path to the field is required. For instance: - -[source,js] ---------------- -PUT my_index -{ - "mappings": { - "my_type": { - "properties": { - "title": { "type": "string" }, <1> - "name": { - "properties": { - "title": { "type": "string" }, <2> - "first": { "type": "string" }, - "last": { "type": "string" } - } - } - } - } - } -} ---------------- -<1> This field is referred to as `title`. -<2> This field is referred to as `name.title`. - -Previously, the two `title` fields in the example above could have been -confused with each other when using the short name `title`. - -==== Type name prefix removed - -Previously, two fields with the same name in two different types could -sometimes be disambiguated by prepending the type name. As a side effect, it -would add a filter on the type name to the relevant query. This feature was -ambiguous -- a type name could be confused with a field name -- and didn't -work everywhere e.g. aggregations. - -Instead, fields should be specified with the full path, but without a type -name prefix. If you wish to filter by the `_type` field, either specify the -type in the URL or add an explicit filter. - -The following example query in 1.x: - -[source,js] ----------------------------- -GET my_index/_search -{ - "query": { - "match": { - "my_type.some_field": "quick brown fox" - } - } -} ----------------------------- - -would be rewritten in 2.0 as: - -[source,js] ----------------------------- -GET my_index/my_type/_search <1> -{ - "query": { - "match": { - "some_field": "quick brown fox" <2> - } - } -} ----------------------------- -<1> The type name can be specified in the URL to act as a filter. -<2> The field name should be specified without the type prefix. - -==== Field names may not contain dots - -In 1.x, it was possible to create fields with dots in their name, for -instance: - -[source,js] ----------------------------- -PUT my_index -{ - "mappings": { - "my_type": { - "properties": { - "foo.bar": { <1> - "type": "string" - }, - "foo": { - "properties": { - "bar": { <1> - "type": "string" - } - } - } - } - } - } -} ----------------------------- -<1> These two fields cannot be distinguished as both are referred to as `foo.bar`. - -You can no longer create fields with dots in the name. - -==== Type names may not start with a dot - -In 1.x, Elasticsearch would issue a warning if a type name included a dot, -e.g. `my.type`. Now that type names are no longer used to distinguish between -fields in different types, this warning has been relaxed: type names may now -contain dots, but they may not *begin* with a dot. The only exception to this -is the special `.percolator` type. - -==== Type names may not be longer than 255 characters - -Mapping type names may not be longer than 255 characters. Long type names -will continue to function on indices created before upgrade, but it will not -be possible create types with long names in new indices. - -==== Types may no longer be deleted - -In 1.x it was possible to delete a type mapping, along with all of the -documents of that type, using the delete mapping API. This is no longer -supported, because remnants of the fields in the type could remain in the -index, causing corruption later on. - -Instead, if you need to delete a type mapping, you should reindex to a new -index which does not contain the mapping. If you just need to delete the -documents that belong to that type, then use the delete-by-query plugin -instead. - -[[migration-meta-fields]] -==== Type meta-fields - -The <> associated with had configuration options -removed, to make them more reliable: - -* `_id` configuration can no longer be changed. If you need to sort, use the <> field instead. -* `_type` configuration can no longer be changed. -* `_index` configuration can no longer be changed. -* `_routing` configuration is limited to marking routing as required. -* `_field_names` configuration is limited to disabling the field. -* `_size` configuration is limited to enabling the field. -* `_timestamp` configuration is limited to enabling the field, setting format and default value. -* `_boost` has been removed. -* `_analyzer` has been removed. - -Importantly, *meta-fields can no longer be specified as part of the document -body.* Instead, they must be specified in the query string parameters. For -instance, in 1.x, the `routing` could be specified as follows: - -[source,json] ------------------------------ -PUT my_index -{ - "mappings": { - "my_type": { - "_routing": { - "path": "group" <1> - }, - "properties": { - "group": { <1> - "type": "string" - } - } - } - } -} - -PUT my_index/my_type/1 <2> -{ - "group": "foo" -} ------------------------------ -<1> This 1.x mapping tells Elasticsearch to extract the `routing` value from the `group` field in the document body. -<2> This indexing request uses a `routing` value of `foo`. - -In 2.0, the routing must be specified explicitly: - -[source,json] ------------------------------ -PUT my_index -{ - "mappings": { - "my_type": { - "_routing": { - "required": true <1> - }, - "properties": { - "group": { - "type": "string" - } - } - } - } -} - -PUT my_index/my_type/1?routing=bar <2> -{ - "group": "foo" -} ------------------------------ -<1> Routing can be marked as required to ensure it is not forgotten during indexing. -<2> This indexing request uses a `routing` value of `bar`. - -==== `_timestamp` and `_ttl` deprecated - -The `_timestamp` and `_ttl` fields are deprecated, but will remain functional -for the remainder of the 2.x series. - -Instead of the `_timestamp` field, use a normal <> field and set -the value explicitly. - -The current `_ttl` functionality will be replaced in a future version with a -new implementation of TTL, possibly with different semantics, and will not -depend on the `_timestamp` field. - -==== Analyzer mappings - -Previously, `index_analyzer` and `search_analyzer` could be set separately, -while the `analyzer` setting would set both. The `index_analyzer` setting has -been removed in favour of just using the `analyzer` setting. - -If just the `analyzer` is set, it will be used at index time and at search time. To use a different analyzer at search time, specify both the `analyzer` and a `search_analyzer`. - -The `index_analyzer`, `search_analyzer`, and `analyzer` type-level settings -have also been removed, as it is no longer possible to select fields based on -the type name. - -The `_analyzer` meta-field, which allowed setting an analyzer per document has -also been removed. It will be ignored on older indices. - -==== Date fields and Unix timestamps - -Previously, `date` fields would first try to parse values as a Unix timestamp --- milliseconds-since-the-epoch -- before trying to use their defined date -`format`. This meant that formats like `yyyyMMdd` could never work, as values -would be interpreted as timestamps. - -In 2.0, we have added two formats: `epoch_millis` and `epoch_second`. Only -date fields that use these formats will be able to parse timestamps. - -These formats cannot be used in dynamic templates, because they are -indistinguishable from long values. - -==== Default date format - -The default date format has changed from `date_optional_time` to -`strict_date_optional_time`, which expects a 4 digit year, and a 2 digit month -and day, (and optionally, 2 digit hour, minute, and second). - -A dynamically added date field, by default, includes the `epoch_millis` -format to support timestamp parsing. For instance: - -[source,js] -------------------------- -PUT my_index/my_type/1 -{ - "date_one": "2015-01-01" <1> -} -------------------------- -<1> Has `format`: `"strict_date_optional_time||epoch_millis"`. - -==== `mapping.date.round_ceil` setting - -The `mapping.date.round_ceil` setting for date math parsing has been removed. - -[[migration-bool-fields]] -==== Boolean fields - -Boolean fields used to have a string fielddata with `F` meaning `false` and `T` -meaning `true`. They have been refactored to use numeric fielddata, with `0` -for `false` and `1` for `true`. As a consequence, the format of the responses of -the following APIs changed when applied to boolean fields: `0`/`1` is returned -instead of `F`/`T`: - -* <> -* <> -* <> - -In addition, terms aggregations use a custom formatter for boolean (like for -dates and ip addresses, which are also backed by numbers) in order to return -the user-friendly representation of boolean fields: `false`/`true`: - -[source,js] ---------------- -"buckets": [ - { - "key": 0, - "key_as_string": "false", - "doc_count": 42 - }, - { - "key": 1, - "key_as_string": "true", - "doc_count": 12 - } -] ---------------- - -==== `index_name` and `path` removed - -The `index_name` setting was used to change the name of the Lucene field, -and the `path` setting was used on `object` fields to determine whether the -Lucene field should use the full path (including parent object fields), or -just the final `name`. - -These setting have been removed as their purpose is better served with the -<> parameter. - -==== Murmur3 Fields - -Fields of type `murmur3` can no longer change `doc_values` or `index` setting. -They are always mapped as follows: - -[source,js] ---------------------- -{ - "type": "murmur3", - "index": "no", - "doc_values": true -} ---------------------- - -==== Mappings in config files not supported - -The ability to specify mappings in configuration files has been removed. To -specify default mappings that apply to multiple indexes, use -<> instead. - -Along with this change, the following settings have been removed: - -* `index.mapper.default_mapping_location` -* `index.mapper.default_percolator_mapping_location` - -==== Fielddata formats - -Now that doc values are the default for fielddata, specialized in-memory -formats have become an esoteric option. These fielddata formats have been removed: - -* `fst` on string fields -* `compressed` on geo points - -The default fielddata format will be used instead. - -==== Posting and doc-values codecs - -It is no longer possible to specify per-field postings and doc values formats -in the mappings. This setting will be ignored on indices created before 2.0 -and will cause mapping parsing to fail on indices created on or after 2.0. For -old indices, this means that new segments will be written with the default -postings and doc values formats of the current codec. - -It is still possible to change the whole codec by using the `index.codec` -setting. Please however note that using a non-default codec is discouraged as -it could prevent future versions of Elasticsearch from being able to read the -index. - -==== Compress and compress threshold - -The `compress` and `compress_threshold` options have been removed from the -`_source` field and fields of type `binary`. These fields are compressed by -default. If you would like to increase compression levels, use the new -<> setting instead. - -==== position_offset_gap - -The `position_offset_gap` option is renamed to 'position_increment_gap'. This was -done to clear away the confusion. Elasticsearch's 'position_increment_gap' now is -mapped directly to Lucene's 'position_increment_gap' - -The default `position_increment_gap` is now 100. Indexes created in Elasticsearch -2.0.0 will default to using 100 and indexes created before that will continue -to use the old default of 0. This was done to prevent phrase queries from -matching across different values of the same term unexpectedly. Specifically, -100 was chosen to cause phrase queries with slops up to 99 to match only within -a single value of a field. - -==== copy_to and multi fields - -A <> within a <> is ignored from version 2.0 on. With any version after -2.1 or 2.0.1 creating a mapping that has a copy_to within a multi field will result -in an exception. - - diff --git a/docs/reference/migration/migrate_2_0/network.asciidoc b/docs/reference/migration/migrate_2_0/network.asciidoc deleted file mode 100644 index d493bff5688..00000000000 --- a/docs/reference/migration/migrate_2_0/network.asciidoc +++ /dev/null @@ -1,39 +0,0 @@ -[[breaking_20_network_changes]] -=== Network changes - -==== Bind to localhost - -Elasticsearch 2.x will only bind to localhost by default. It will try to bind -to both 127.0.0.1 (IPv4) and [::1] (IPv6), but will work happily in -environments where only IPv4 or IPv6 is available. This change prevents -Elasticsearch from trying to connect to other nodes on your network unless you -specifically tell it to do so. When moving to production you should configure -the `network.host` parameter, either in the `elasticsearch.yml` config file or -on the command line: - -[source,sh] --------------------- -bin/elasticsearch --network.host 192.168.1.5 -bin/elasticsearch --network.host _non_loopback_ --------------------- - -The full list of options that network.host accepts can be found in the <>. - -==== Unicast discovery - -When bound to localhost, Elasticsearch will use unicast to contact -the first 5 ports in the `transport.tcp.port` range, which defaults to -`9300-9400`. This preserves the zero-config auto-clustering experience for the developer, -but it means that you will have to provide a list of <> -when moving to production, for instance: - -[source,yaml] ---------------------- -discovery.zen.ping.unicast.hosts: [ 192.168.1.2, 192.168.1.3 ] ---------------------- - -You don’t need to list all of the nodes in your cluster as unicast hosts, but -you should specify at least a quorum (majority) of master-eligible nodes. A -big cluster will typically have three dedicated master nodes, in which case we -recommend listing all three of them as unicast hosts. - diff --git a/docs/reference/migration/migrate_2_0/packaging.asciidoc b/docs/reference/migration/migrate_2_0/packaging.asciidoc deleted file mode 100644 index dae87187ba4..00000000000 --- a/docs/reference/migration/migrate_2_0/packaging.asciidoc +++ /dev/null @@ -1,84 +0,0 @@ -[[breaking_20_plugin_and_packaging_changes]] -=== Plugin and packaging changes - -==== Symbolic links and paths - -Elasticsearch 2.0 runs with the Java security manager enabled and is much more -restrictive about which paths it is allowed to access. Various paths can be -configured, e.g. `path.data`, `path.scripts`, `path.repo`. A configured path -may itself be a symbolic link, but no symlinks under that path will be -followed. - -==== Running `bin/elasticsearch` - -The command line parameter parsing has been rewritten to deal properly with -spaces in parameters. All config settings can still be specified on the -command line when starting Elasticsearch, but they must appear after the -built-in "static parameters", such as `-d` (to daemonize) and `-p` (the PID path). - -For instance: - -[source,sh] ------------ -bin/elasticsearch -d -p /tmp/foo.pid --http.cors.enabled=true --http.cors.allow-origin='*' ------------ - -For a list of static parameters, run `bin/elasticsearch -h` - -==== `-f` removed - -The `-f` parameter, which used to indicate that Elasticsearch should be run in -the foreground, was deprecated in 1.0 and removed in 2.0. - -==== `V` for version - -The `-v` parameter now means `--verbose` for both `bin/elasticsearch-plugin` and -`bin/elasticsearch` (although it has no effect on the latter). To output the -version, use `-V` or `--version` instead. - -==== Plugin manager should run as root - -The permissions of the `config`, `bin`, and `plugins` directories in the RPM -and deb packages have been made more restrictive. The plugin manager should -be run as root otherwise it will not be able to install plugins. - -==== Support for official plugins - -Almost all of the official Elasticsearch plugins have been moved to the main -`elasticsearch` repository. They will be released at the same time as -Elasticsearch and have the same version number as Elasticsearch. - -Official plugins can be installed as follows: - -[source,sh] ---------------- -sudo bin/elasticsearch-plugin install analysis-icu ---------------- - -Community-provided plugins can be installed as before. - -==== Plugins require descriptor file - -All plugins are now required to have a https://github.com/elastic/elasticsearch/blob/2.0/dev-tools/src/main/resources/plugin-metadata/plugin-descriptor.properties[plugin-descriptor.properties] file. If a node has a plugin installed which lacks this file, it will be unable to start. - -==== Repository naming structure changes - -Elasticsearch 2.0 changes the way the repository URLs are referenced. Instead -of specific repositories for both major and minor versions, the repositories will -use a major version reference only. - -The URL for apt packages now uses the following structure; - -[source,sh] ---------------- -deb http://packages.elastic.co/elasticsearch/2.x/debian stable main ---------------- - -And for yum packages it is; - -[source,sh] ---------------- -baseurl=http://packages.elastic.co/elasticsearch/2.x/centos ---------------- - -The <> page details this change. diff --git a/docs/reference/migration/migrate_2_0/parent_child.asciidoc b/docs/reference/migration/migrate_2_0/parent_child.asciidoc deleted file mode 100644 index 1addf883973..00000000000 --- a/docs/reference/migration/migrate_2_0/parent_child.asciidoc +++ /dev/null @@ -1,43 +0,0 @@ -[[breaking_20_parent_child_changes]] -=== Parent/Child changes - -Parent/child has been rewritten completely to reduce memory usage and to -execute `has_child` and `has_parent` queries faster and more efficient. The -`_parent` field uses doc values by default. The refactored and improved -implementation is only active for indices created on or after version 2.0. - -In order to benefit from all the performance and memory improvements, we -recommend reindexing all existing indices that use the `_parent` field. - -==== Parent type cannot pre-exist - -A mapping type is declared as a child of another mapping type by specifying -the `_parent` meta field: - -[source,js] --------------------------- -DELETE * - -PUT my_index -{ - "mappings": { - "my_parent": {}, - "my_child": { - "_parent": { - "type": "my_parent" <1> - } - } - } -} --------------------------- -<1> The `my_parent` type is the parent of the `my_child` type. - -The mapping for the parent type can be added at the same time as the mapping -for the child type, but cannot be added before the child type. - -==== `top_children` query removed - -The `top_children` query has been removed in favour of the `has_child` query. -It wasn't always faster than the `has_child` query and the results were usually -inaccurate. The total hits and any aggregations in the same search request -would be incorrect if `top_children` was used. diff --git a/docs/reference/migration/migrate_2_0/query_dsl.asciidoc b/docs/reference/migration/migrate_2_0/query_dsl.asciidoc deleted file mode 100644 index a85ade8690b..00000000000 --- a/docs/reference/migration/migrate_2_0/query_dsl.asciidoc +++ /dev/null @@ -1,189 +0,0 @@ -[[breaking_20_query_dsl_changes]] -=== Query DSL changes - -==== Queries and filters merged - -Queries and filters have been merged -- all filter clauses are now query -clauses. Instead, query clauses can now be used in _query context_ or in -_filter context_: - -Query context:: - -A query used in query context will calculate relevance scores and will not be -cacheable. Query context is used whenever filter context does not apply. - -Filter context:: -+ --- - -A query used in filter context will not calculate relevance scores, and will -be cacheable. Filter context is introduced by: - -* the `constant_score` query -* the `must_not` and (newly added) `filter` parameter in the `bool` query -* the `filter` and `filters` parameters in the `function_score` query -* any API called `filter`, such as the `post_filter` search parameter, or in - aggregations or index aliases --- - -==== `terms` query and filter - -The `execution` option of the `terms` filter is now deprecated and is ignored -if provided. Similarly, the `terms` query no longer supports the -`minimum_should_match` parameter. - -==== `or` and `and` now implemented via `bool` - -The `or` and `and` filters previously had a different execution pattern to the -`bool` filter. It used to be important to use `and`/`or` with certain filter -clauses, and `bool` with others. - -This distinction has been removed: the `bool` query is now smart enough to -handle both cases optimally. As a result of this change, the `or` and `and` -filters are now sugar syntax which are executed internally as a `bool` query. -These filters may be removed in the future. - -==== `filtered` query and `query` filter deprecated - -The `query` filter is deprecated as is it no longer needed -- all queries can -be used in query or filter context. - -The `filtered` query is deprecated in favour of the `bool` query. Instead of -the following: - -[source,js] -------------------------- -GET _search -{ - "query": { - "filtered": { - "query": { - "match": { - "text": "quick brown fox" - } - }, - "filter": { - "term": { - "status": "published" - } - } - } - } -} -------------------------- - -move the query and filter to the `must` and `filter` parameters in the `bool` -query: - -[source,js] -------------------------- -GET _search -{ - "query": { - "bool": { - "must": { - "match": { - "text": "quick brown fox" - } - }, - "filter": { - "term": { - "status": "published" - } - } - } - } -} -------------------------- - -==== Filter auto-caching - -It used to be possible to control which filters were cached with the `_cache` -option and to provide a custom `_cache_key`. These options are deprecated -and, if present, will be ignored. - -Query clauses used in filter context are now auto-cached when it makes sense -to do so. The algorithm takes into account the frequency of use, the cost of -query execution, and the cost of building the filter. - -The `terms` filter lookup mechanism no longer caches the values of the -document containing the terms. It relies on the filesystem cache instead. If -the lookup index is not too large, it is recommended to replicate it to all -nodes by setting `index.auto_expand_replicas: 0-all` in order to remove the -network overhead as well. - -==== Numeric queries use IDF for scoring - -Previously, term queries on numeric fields were deliberately prevented from -using the usual Lucene scoring logic and this behaviour was undocumented and, -to some, unexpected. - -Single `term` queries on numeric fields now score in the same way as string -fields, using IDF and norms (if enabled). - -To query numeric fields without scoring, the query clause should be used in -filter context, e.g. in the `filter` parameter of the `bool` query, or wrapped -in a `constant_score` query: - -[source,js] ----------------------------- -GET _search -{ - "query": { - "bool": { - "must": [ - { - "match": { <1> - "numeric_tag": 5 - } - } - ], - "filter": [ - { - "match": { <2> - "count": 5 - } - } - ] - } - } -} ----------------------------- -<1> This clause would include IDF in the relevance score calculation. -<2> This clause would have no effect on the relevance score. - -==== Fuzziness and fuzzy-like-this - -Fuzzy matching used to calculate the score for each fuzzy alternative, meaning -that rare misspellings would have a higher score than the more common correct -spellings. Now, fuzzy matching blends the scores of all the fuzzy alternatives -to use the IDF of the most frequently occurring alternative. - -Fuzziness can no longer be specified using a percentage, but should instead -use the number of allowed edits: - -* `0`, `1`, `2`, or -* `AUTO` (which chooses `0`, `1`, or `2` based on the length of the term) - -The `fuzzy_like_this` and `fuzzy_like_this_field` queries used a very -expensive approach to fuzzy matching and have been removed. - -==== More Like This - -The More Like This (`mlt`) API and the `more_like_this_field` (`mlt_field`) -query have been removed in favor of the -<> query. - -The parameter `percent_terms_to_match` has been removed in favor of -`minimum_should_match`. - -==== `limit` filter deprecated - -The `limit` filter is deprecated and becomes a no-op. You can achieve similar -behaviour using the <> parameter. - -==== Java plugins registering custom queries - -Java plugins that register custom queries can do so by using the -`IndicesQueriesModule#addQuery(Class)` method. Other -ways to register custom queries are not supported anymore. diff --git a/docs/reference/migration/migrate_2_0/removals.asciidoc b/docs/reference/migration/migrate_2_0/removals.asciidoc deleted file mode 100644 index 31693c3d3ac..00000000000 --- a/docs/reference/migration/migrate_2_0/removals.asciidoc +++ /dev/null @@ -1,100 +0,0 @@ -[[breaking_20_removed_features]] -=== Removed features - -==== Rivers have been removed - -Elasticsearch does not support rivers anymore. While we had first planned to -keep them around to ease migration, keeping support for rivers proved to be -challenging as it conflicted with other important changes that we wanted to -bring to 2.0 like synchronous dynamic mappings updates, so we eventually -decided to remove them entirely. See -link:/blog/deprecating_rivers[Deprecating Rivers] for more background about -why we took this decision. - -==== Facets have been removed - -Facets, deprecated since 1.0, have now been removed. Instead, use the much -more powerful and flexible <> framework. -This also means that Kibana 3 will not work with Elasticsearch 2.0. - -==== MVEL has been removed - -The MVEL scripting language has been removed. The default scripting language -is now Groovy. - -==== Delete-by-query is now a plugin - -The old delete-by-query functionality was fast but unsafe. It could lead to -document differences between the primary and replica shards, and could even -produce out of memory exceptions and cause the cluster to crash. - -This feature has been reimplemented using the <> and -<> APIs, which may be slower for queries which match -large numbers of documents, but is safe. - -Currently, a long running delete-by-query job cannot be cancelled, which is -one of the reasons that this functionality is only available as a plugin. You -can install the plugin with: - -[source,sh] ------------------- -./bin/elasticsearch-plugin install delete-by-query ------------------- - -See {plugins}/plugins-delete-by-query.html for more information. - -==== Multicast Discovery is now a plugin - -Support for multicast is very patchy. Linux doesn’t allow multicast listening on localhost, -while OS/X sends multicast broadcasts across all interfaces regardless of the configured -bind address. On top of that, some networks have multicast disabled by default. - -This feature has been moved to a plugin. The default discovery mechanism now uses -unicast, with a default setup which looks for the first 5 ports on localhost. If you -still need to use multicast discovery, you can install the plugin with: - -[source,sh] ------------------- -./bin/elasticsearch-plugin install discovery-multicast ------------------- - -==== `_shutdown` API - -The `_shutdown` API has been removed without a replacement. Nodes should be -managed via the operating system and the provided start/stop scripts. - -==== `murmur3` is now a plugin - -The `murmur3` field, which indexes hashes of the field values, has been moved -out of core and is available as a plugin. It can be installed as: - -[source,sh] ------------------- -./bin/elasticsearch-plugin install mapper-murmur3 ------------------- - -==== `_size` is now a plugin - -The `_size` meta-data field, which indexes the size in bytes of the original -JSON document, has been moved out of core and is available as a plugin. It -can be installed as: - -[source,sh] ------------------- -./bin/elasticsearch-plugin install mapper-size ------------------- - -==== Thrift and memcached transport - -The thrift and memcached transport plugins are no longer supported. Instead, use -either the HTTP transport (enabled by default) or the node or transport Java client. - -==== Bulk UDP - -The bulk UDP API has been removed. Instead, use the standard -<> API, or use UDP to send documents to Logstash first. - -==== MergeScheduler pluggability - -The merge scheduler is no longer pluggable. - diff --git a/docs/reference/migration/migrate_2_0/scripting.asciidoc b/docs/reference/migration/migrate_2_0/scripting.asciidoc deleted file mode 100644 index 495d2daa2c5..00000000000 --- a/docs/reference/migration/migrate_2_0/scripting.asciidoc +++ /dev/null @@ -1,103 +0,0 @@ -[[breaking_20_scripting_changes]] -=== Scripting changes - -==== Scripting syntax - -The syntax for scripts has been made consistent across all APIs. The accepted -format is as follows: - -Inline/Dynamic scripts:: -+ --- - -[source,js] ---------------- -"script": { - "inline": "doc['foo'].value + val", <1> - "lang": "groovy", <2> - "params": { "val": 3 } <3> -} ---------------- -<1> The inline script to execute. -<2> The optional language of the script. -<3> Any named parameters. --- - -Indexed scripts:: -+ --- -[source,js] ---------------- -"script": { - "id": "my_script_id", <1> - "lang": "groovy", <2> - "params": { "val": 3 } <3> -} ---------------- -<1> The ID of the indexed script. -<2> The optional language of the script. -<3> Any named parameters. --- - -File scripts:: -+ --- -[source,js] ---------------- -"script": { - "file": "my_file", <1> - "lang": "groovy", <2> - "params": { "val": 3 } <3> -} ---------------- -<1> The filename of the script, without the `.lang` suffix. -<2> The optional language of the script. -<3> Any named parameters. --- - -For example, an update request might look like this: - -[source,js] ---------------- -POST my_index/my_type/1/_update -{ - "script": { - "inline": "ctx._source.count += val", - "params": { "val": 3 } - }, - "upsert": { - "count": 0 - } -} ---------------- - -A short syntax exists for running inline scripts in the default scripting -language without any parameters: - -[source,js] ----------------- -GET _search -{ - "script_fields": { - "concat_fields": { - "script": "doc['one'].value + ' ' + doc['two'].value" - } - } -} ----------------- - -==== Scripting settings - -The `script.disable_dynamic` node setting has been replaced by fine-grained -script settings described in <>. - -==== Groovy scripts sandbox - -The Groovy sandbox and related settings have been removed. Groovy is now a -non-sandboxed scripting language, without any option to turn the sandbox on. - -==== Plugins making use of scripts - -Plugins that make use of scripts must register their own script context -through `ScriptModule`. Script contexts can be used as part of fine-grained -settings to enable/disable scripts selectively. diff --git a/docs/reference/migration/migrate_2_0/search.asciidoc b/docs/reference/migration/migrate_2_0/search.asciidoc deleted file mode 100644 index 036313077ff..00000000000 --- a/docs/reference/migration/migrate_2_0/search.asciidoc +++ /dev/null @@ -1,122 +0,0 @@ -[[breaking_20_search_changes]] -=== Search changes - -==== Partial fields - -Partial fields have been removed in favor of <>. - -==== `search_type=count` deprecated - -The `count` search type has been deprecated. All benefits from this search -type can now be achieved by using the (default) `query_then_fetch` search type -and setting `size` to `0`. - -==== The count api internally uses the search api - -The count api is now a shortcut to the search api with `size` set to 0. As a -result, a total failure will result in an exception being returned rather -than a normal response with `count` set to `0` and shard failures. - -==== All stored meta-fields returned by default - -Previously, meta-fields like `_routing`, `_timestamp`, etc would only be -included in the search results if specifically requested with the `fields` -parameter. Now, all meta-fields which have stored values will be returned by -default. Additionally, they are now returned at the top level (along with -`_index`, `_type`, and `_id`) instead of in the `fields` element. - -For instance, the following request: - -[source,sh] ---------------- -GET /my_index/_search?fields=foo ---------------- - -might return: - -[source,js] ---------------- -{ - [...] - "hits": { - "total": 1, - "max_score": 1, - "hits": [ - { - "_index": "my_index", - "_type": "my_type", - "_id": "1", - "_score": 1, - "_timestamp": 10000000, <1> - "fields": { - "foo" : [ "bar" ] - } - } - ] - } -} ---------------- -<1> The `_timestamp` is returned by default, and at the top level. - - -==== Script fields - -Script fields in 1.x were only returned as a single value. Even if the return -value of a script was a list, it would be returned as an array containing an -array: - -[source,js] ---------------- -"fields": { - "my_field": [ - [ - "v1", - "v2" - ] - ] -} ---------------- - -In elasticsearch 2.0, scripts that return a list of values are treated as -multivalued fields. The same example would return the following response, with -values in a single array. - -[source,js] ---------------- -"fields": { - "my_field": [ - "v1", - "v2" - ] -} ---------------- - -==== Timezone for date field - -Specifying the `time_zone` parameter in queries or aggregations on fields of -type `date` must now be either an ISO 8601 UTC offset, or a timezone id. For -example, the value `+1:00` must now be written as `+01:00`. - -==== Only highlight queried fields - -The default value for the `require_field_match` option has changed from -`false` to `true`, meaning that the highlighters will, by default, only take -the fields that were queried into account. - -This means that, when querying the `_all` field, trying to highlight on any -field other than `_all` will produce no highlighted snippets. Querying the -same fields that need to be highlighted is the cleaner solution to get -highlighted snippets back. Otherwise `require_field_match` option can be set -to `false` to ignore field names completely when highlighting. - -The postings highlighter doesn't support the `require_field_match` option -anymore, it will only highlight fields that were queried. - -==== Postings highlighter doesn't support `match_phrase_prefix` - -The `match` query with type set to `phrase_prefix` (or the -`match_phrase_prefix` query) is not supported by the postings highlighter. No -highlighted snippets will be returned. - - - diff --git a/docs/reference/migration/migrate_2_0/settings.asciidoc b/docs/reference/migration/migrate_2_0/settings.asciidoc deleted file mode 100644 index 06aa743a5d8..00000000000 --- a/docs/reference/migration/migrate_2_0/settings.asciidoc +++ /dev/null @@ -1,204 +0,0 @@ -[[breaking_20_setting_changes]] -=== Setting changes - -==== Command line flags - -Command line flags using single dash notation must be now specified as the first arguments. -For example if previously using: - -[source,sh] ---------------- -./elasticsearch --node.name=test_node -Des.path.conf=/opt/elasticsearch/conf/test_node ---------------- - -This will now need to be changed to: - -[source,sh] ---------------- -./elasticsearch -Des.path.conf=/opt/elasticsearch/conf/test_node --node.name=test_node ---------------- - -for the flag to take effect. - -[[migration-script-settings]] -==== Scripting settings - -The `script.disable_dynamic` node setting has been replaced by fine-grained -script settings described in the <>. -The following setting previously used to enable dynamic or inline scripts: - -[source,yaml] ---------------- -script.disable_dynamic: false ---------------- - -It should be replaced with the following two settings in `elasticsearch.yml` that -achieve the same result: - -[source,yaml] ---------------- -script.inline: true -script.indexed: true ---------------- - -==== Units required for time and byte-sized settings - -Any settings which accept time or byte values must now be specified with -units. For instance, it is too easy to set the `refresh_interval` to 1 -*millisecond* instead of 1 second: - -[source,js] ---------------- -PUT _settings -{ - "index.refresh_interval": 1 -} ---------------- - -In 2.0, the above request will throw an exception. Instead the refresh -interval should be set to `"1s"` for one second. - -==== Merge and merge throttling settings - -The tiered merge policy is now the only supported merge policy. These settings -have been removed: - -* `index.merge.policy.type` -* `index.merge.policy.min_merge_size` -* `index.merge.policy.max_merge_size` -* `index.merge.policy.merge_factor` -* `index.merge.policy.max_merge_docs` -* `index.merge.policy.calibrate_size_by_deletes` -* `index.merge.policy.min_merge_docs` -* `index.merge.policy.max_merge_docs` - -Merge throttling now uses a feedback loop to auto-throttle. These settings -have been removed: - -* `indices.store.throttle.type` -* `indices.store.throttle.max_bytes_per_sec` -* `index.store.throttle.type` -* `index.store.throttle.max_bytes_per_sec` - -==== Shadow replica settings - -The `node.enable_custom_paths` setting has been removed and replaced by the -`path.shared_data` setting to allow shadow replicas with custom paths to work -with the security manager. For example, if your previous configuration had: - -[source,yaml] ------- -node.enable_custom_paths: true ------- - -And you created an index using shadow replicas with `index.data_path` set to -`/opt/data/my_index` with the following: - -[source,js] --------------------------------------------------- -PUT /my_index -{ - "index": { - "number_of_shards": 1, - "number_of_replicas": 4, - "data_path": "/opt/data/my_index", - "shadow_replicas": true - } -} --------------------------------------------------- - -For 2.0, you will need to set `path.shared_data` to a parent directory of the -index's data_path, so: - -[source,yaml] ------------ -path.shared_data: /opt/data ------------ - -==== Resource watcher settings renamed - -The setting names for configuring the resource watcher have been renamed -to prevent clashes with the watcher plugin - -* `watcher.enabled` is now `resource.reload.enabled` -* `watcher.interval` is now `resource.reload.interval` -* `watcher.interval.low` is now `resource.reload.interval.low` -* `watcher.interval.medium` is now `resource.reload.interval.medium` -* `watcher.interval.high` is now `resource.reload.interval.high` - -==== index.gateway setting renamed - -* `index.gateway.local.sync` is now `index.translog.sync_interval` - -==== Hunspell dictionary configuration - -The parameter `indices.analysis.hunspell.dictionary.location` has been -removed, and `/hunspell` is always used. - -==== CORS allowed origins - -The CORS allowed origins setting, `http.cors.allow-origin`, no longer has a default value. Previously, the default value -was `*`, which would allow CORS requests from any origin and is considered insecure. The `http.cors.allow-origin` setting -should be specified with only the origins that should be allowed, like so: - -[source,yaml] ---------------- -http.cors.allow-origin: /https?:\/\/localhost(:[0-9]+)?/ ---------------- - -==== JSONP support - -JSONP callback support has now been removed. CORS should be used to access Elasticsearch -over AJAX instead: - -[source,yaml] ---------------- -http.cors.enabled: true -http.cors.allow-origin: /https?:\/\/localhost(:[0-9]+)?/ ---------------- - -==== In memory indices - -The `memory` / `ram` store (`index.store.type`) option was removed in -Elasticsearch. In-memory indices are no longer supported. - -==== Log messages truncated - -Log messages are now truncated at 10,000 characters. This can be changed in -the `logging.yml` configuration file with the `file.layout.conversionPattern` -setting. - -==== Custom config file - -It is no longer possible to specify a custom config file with the `CONF_FILE` -environment variable, or the `-Des.config`, `-Des.default.config`, or -`-Delasticsearch.config` parameters. - -Instead, the config file must be named `elasticsearch.yml` and must be located -in the default `config/` directory, unless a custom config directory is specified. - -The location of a custom config directory may be specified as follows: - -[source,sh] --------------- -./bin/elasticsearch --path.conf=/path/to/conf/dir -./bin/elasticsearch-plugin -Des.path.conf=/path/to/conf/dir install analysis-icu --------------- - -When using the RPM or debian packages, the plugin script and the -init/service scripts will consult the `CONF_DIR` environment variable -to check for a custom config location. The value of the `CONF_DIR` -variable can be set in the environment config file which is located either in -`/etc/default/elasticsearch` or `/etc/sysconfig/elasticsearch`. - -==== Custom analysis file paths - -It is no longer possible to set custom file path outside `CONF_DIR` for `*_path` settings -in <> or <> filters. -You must specify either relative path to `CONF_DIR` location or absolute path inside `CONF_DIR` location. - -==== `ES_CLASSPATH removed` - -The `ES_CLASSPATH` environment variable is no longer used to set the class -path. External libraries should preferably be loaded using the plugin -mechanism or, if you really must, be copied to the `lib/` directory. diff --git a/docs/reference/migration/migrate_2_0/snapshot_restore.asciidoc b/docs/reference/migration/migrate_2_0/snapshot_restore.asciidoc deleted file mode 100644 index c9b222abdc8..00000000000 --- a/docs/reference/migration/migrate_2_0/snapshot_restore.asciidoc +++ /dev/null @@ -1,38 +0,0 @@ -[[breaking_20_snapshot_and_restore_changes]] -=== Snapshot and Restore changes - -==== File-system repositories must be whitelisted - -Locations of the shared file system repositories and the URL repositories with -`file:` URLs now have to be registered before starting Elasticsearch using the -`path.repo` setting. The `path.repo` setting can contain one or more -repository locations: - -[source,yaml] ---------------- -path.repo: ["/mnt/daily", "/mnt/weekly"] ---------------- - -If the repository location is specified as an absolute path it has to start -with one of the locations specified in `path.repo`. If the location is -specified as a relative path, it will be resolved against the first location -specified in the `path.repo` setting. - -==== URL repositories must be whitelisted - -URL repositories with `http:`, `https:`, and `ftp:` URLs have to be -whitelisted before starting Elasticsearch with the -`repositories.url.allowed_urls` setting. This setting supports wildcards in -the place of host, path, query, and fragment. For example: - -[source,yaml] ------------------------------------ -repositories.url.allowed_urls: ["http://www.example.org/root/*", "https://*.mydomain.com/*?*#*"] ------------------------------------ - -==== Wildcard expansion - -The obsolete parameters `expand_wildcards_open` and `expand_wildcards_close` -are no longer supported by the snapshot and restore operations. These -parameters have been replaced by a single `expand_wildcards` parameter. See -<> for more. diff --git a/docs/reference/migration/migrate_2_0/stats.asciidoc b/docs/reference/migration/migrate_2_0/stats.asciidoc deleted file mode 100644 index dc80ecd83ec..00000000000 --- a/docs/reference/migration/migrate_2_0/stats.asciidoc +++ /dev/null @@ -1,52 +0,0 @@ -[[breaking_20_stats_info_and_literal_cat_literal_changes]] -=== Stats, info, and `cat` changes - -==== Sigar removed - -We no longer ship the Sigar library for operating system dependent statistics, -as it no longer seems to be maintained. Instead, we rely on the statistics -provided by the JVM. This has resulted in a number of changes to the node -info, and node stats responses: - -* `network.*` has been removed from nodes info and nodes stats. -* `fs.*.dev` and `fs.*.disk*` have been removed from nodes stats. -* `os.*` has been removed from nodes stats, except for `os.timestamp`, - `os.load_average`, `os.mem.*`, and `os.swap.*`. -* `os.mem.total` and `os.swap.total` have been removed from nodes info. -* `process.mem.resident` and `process.mem.share` have been removed from node stats. - -==== Removed `id_cache` from stats apis - -Removed `id_cache` metric from nodes stats, indices stats and cluster stats -apis. This metric has also been removed from the shards cat, indices cat and -nodes cat apis. Parent/child memory is now reported under fielddata, because -it has internally be using fielddata for a while now. - -To just see how much parent/child related field data is taking, the -`fielddata_fields` option can be used on the stats apis. Indices stats -example: - -[source,js] --------------------------------------------------- -GET /_stats/fielddata?fielddata_fields=_parent --------------------------------------------------- - -==== Percolator stats - -The total time spent running percolator queries is now called `percolate.time` -instead of `percolate.get_time`. - -==== Cluster state REST API - -The cluster state API doesn't return the `routing_nodes` section anymore when -`routing_table` is requested. The newly introduced `routing_nodes` flag can be -used separately to control whether `routing_nodes` should be returned. - -==== Index status API - -The deprecated index status API has been removed. - -==== Nodes Stats API - -Queue lengths are now reported as basic numeric so they can easily processed by code. Before we used a human -readable format. For example, a queue with 1,000 items is now reported as `1000` instead of `1k`. diff --git a/docs/reference/migration/migrate_2_0/striping.asciidoc b/docs/reference/migration/migrate_2_0/striping.asciidoc deleted file mode 100644 index 2e80f29c774..00000000000 --- a/docs/reference/migration/migrate_2_0/striping.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -[[breaking_20_multiple_literal_data_path_literal_striping]] -=== Multiple `path.data` striping - -Previously, if the `path.data` setting listed multiple data paths, then a -shard would be ``striped'' across all paths by writing a whole file to each -path in turn (in accordance with the `index.store.distributor` setting). The -result was that files from a single segment in a shard could be spread across -multiple disks, and the failure of any one disk could corrupt multiple shards. - -This striping is no longer supported. Instead, different shards may be -allocated to different paths, but all of the files in a single shard will be -written to the same path. - -If striping is detected while starting Elasticsearch 2.0.0 or later, *all of -the files belonging to the same shard will be migrated to the same path*. If -there is not enough disk space to complete this migration, the upgrade will be -cancelled and can only be resumed once enough disk space is made available. - -The `index.store.distributor` setting has also been removed. - - diff --git a/docs/reference/migration/migrate_2_1.asciidoc b/docs/reference/migration/migrate_2_1.asciidoc deleted file mode 100644 index 454a57f96bc..00000000000 --- a/docs/reference/migration/migrate_2_1.asciidoc +++ /dev/null @@ -1,87 +0,0 @@ -[[breaking-changes-2.1]] -== Breaking changes in 2.1 - -This section discusses the changes that you need to be aware of when migrating -your application to Elasticsearch 2.1. - -* <> -* <> -* <> -* <> -* <> -* <> - -[[breaking_21_search_changes]] -=== Search changes - -==== `search_type=scan` deprecated - -The `scan` search type has been deprecated. All benefits from this search -type can now be achieved by doing a scroll request that sorts documents in -`_doc` order, for instance: - -[source,sh] ---------------- -GET /my_index/_search?scroll=2m -{ - "sort": [ - "_doc" - ] -} ---------------- - -Scroll requests sorted by `_doc` have been optimized to more efficiently resume -from where the previous request stopped, so this will have the same performance -characteristics as the former `scan` search type. - -==== from + size limits - -Elasticsearch will now return an error message if a query's `from` + `size` is -more than the `index.max_result_window` parameter. This parameter defaults to -10,000 which is safe for almost all clusters. Values higher than can consume -significant chunks of heap memory per search and per shard executing the -search. It's safest to leave this value as it is an use the scroll api for any -deep scrolling but this setting is dynamic so it can raised or lowered as -needed. - -[[breaking_21_update_changes]] -=== Update changes - -==== Updates now `detect_noop` by default - -We've switched the default value of the `detect_noop` option from `false` to -`true`. This means that Elasticsearch will ignore updates that don't change -source unless you explicitly set `"detect_noop": false`. `detect_noop` was -always computationally cheap compared to the expense of the update which can be -thought of as a delete operation followed by an index operation. - -[[breaking_21_removed_features]] -=== Removed features - -==== `indices.fielddata.cache.expire` - -The experimental feature `indices.fielddata.cache.expire` has been removed. -For indices that have this setting configured, this config will be ignored. - -[[breaking_21_more_like_this]] -=== More Like This - -The MoreLikeThisQueryBuilder#ignoreLike methods have been deprecated in favor -of using the unlike methods. - -MoreLikeThisBuilder#addItem has been deprecated in favor of using -MoreLikeThisBuilder#addLikeItem. - -[[breaking_21_nested_sorting]] -=== Nested sorting - -If sorting on field inside a nested object then the `nested_path` should be specified. -Before there was an attempt to resolve the nested path automatically, but that was sometimes incorrect. -To avoid confusion the `nested_path` should always be specified. - -[[breaking_21_index_apis]] -=== Index APIs - -==== Optimize API - -The Optimize API has been deprecated, all new optimize actions should use the new Force Merge API. diff --git a/docs/reference/migration/migrate_2_2.asciidoc b/docs/reference/migration/migrate_2_2.asciidoc deleted file mode 100644 index d6035c83b8a..00000000000 --- a/docs/reference/migration/migrate_2_2.asciidoc +++ /dev/null @@ -1,80 +0,0 @@ -[[breaking-changes-2.2]] -== Breaking changes in 2.2 - -This section discusses the changes that you need to be aware of when migrating -your application to Elasticsearch 2.2. - -[[float]] -=== Mapping APIs - -==== Geo Point Type - -The `geo_point` format has been changed to reduce index size and the time required to both index and query -geo point data. To make these performance improvements possible both `doc_values` and `coerce` are required -and therefore cannot be changed. For this reason the `doc_values` and `coerce` parameters have been removed -from the <> field mapping. - -[float] -=== Scripting and security - -The Java Security Manager is being used to lock down the privileges available -to the scripting languages and to restrict the classes they are allowed to -load to a predefined whitelist. These changes may cause scripts which worked -in earlier versions to fail. See <> for more -details. - -[float] -=== Field stats API - -The field stats' response format has been changed for number based and date -fields. The `min_value` and `max_value` elements now return values as number -and the new `min_value_as_string` and `max_value_as_string` return the values -as string. - -[float] -=== Default logging using systemd - -In previous versions of Elasticsearch using systemd, the default logging -configuration routed standard output to `/dev/null` and standard error to -the journal. However, there are often critical error messages at -startup that are logged to standard output rather than standard error -and these error messages would be lost to the ether. The default has -changed to now route standard output to the journal and standard error -to inherit this setting (these are the defaults for systemd). These -settings can be modified by editing the `elasticsearch.service` file. - -[float] -=== Java Client - -Previously it was possible to iterate over `ClusterHealthResponse` to get information about `ClusterIndexHealth`. -While this is still possible, it requires now iterating over the values returned from `getIndices()`: - -[source,java] ---------------- -ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth().get(); -for (Map.Entry index : clusterHealthResponse.getIndices().entrySet()) { - String indexName = index.getKey(); - ClusterIndexHealth health = index.getValue(); -} ---------------- - -[float] -=== Cloud AWS Plugin - -Proxy settings have been deprecated and renamed: - -* from `cloud.aws.proxy_host` to `cloud.aws.proxy.host` -* from `cloud.aws.ec2.proxy_host` to `cloud.aws.ec2.proxy.host` -* from `cloud.aws.s3.proxy_host` to `cloud.aws.s3.proxy.host` -* from `cloud.aws.proxy_port` to `cloud.aws.proxy.port` -* from `cloud.aws.ec2.proxy_port` to `cloud.aws.ec2.proxy.port` -* from `cloud.aws.s3.proxy_port` to `cloud.aws.s3.proxy.port` - -If you are using proxy settings, update your settings as deprecated ones will -be removed in next major version. - -[float] -=== Multicast plugin deprecated - -The `discovery-multicast` plugin has been deprecated in 2.2.0 and has -been removed in 3.0.0. diff --git a/docs/reference/migration/migrate_2_3.asciidoc b/docs/reference/migration/migrate_2_3.asciidoc deleted file mode 100644 index 0d741e2adb2..00000000000 --- a/docs/reference/migration/migrate_2_3.asciidoc +++ /dev/null @@ -1,19 +0,0 @@ -[[breaking-changes-2.3]] -== Breaking changes in 2.3 - -This section discusses the changes that you need to be aware of when migrating -your application to Elasticsearch 2.3. - -* <> - -[[breaking_23_index_apis]] -=== Mappings - -==== Limit to the number of `nested` fields - -Indexing a document with 100 nested fields actually indexes 101 documents as each nested -document is indexed as a separate document. To safeguard against ill-defined mappings -the number of nested fields that can be defined per index has been limited to 50. -This default limit can be changed with the index setting `index.mapping.nested_fields.limit`. -Note that the limit is only checked when new indices are created or mappings are updated. It -will thus only affect existing pre-2.3 indices if their mapping is changed. diff --git a/docs/reference/migration/migrate_5_0.asciidoc b/docs/reference/migration/migrate_5_0.asciidoc index 066c3a878ac..23cadbbd9ed 100644 --- a/docs/reference/migration/migrate_5_0.asciidoc +++ b/docs/reference/migration/migrate_5_0.asciidoc @@ -4,873 +4,52 @@ This section discusses the changes that you need to be aware of when migrating your application to Elasticsearch 5.0. +[IMPORTANT] +.Reindex indices from Elasticseach 1.x or before +========================================= + +Indices created in Elasticsearch 1.x or before will need to be reindexed with +Elasticsearch 2.x in order to be readable by Elasticsearch 5.x. The easiest +way to do this is to upgrade to Elasticsearch 2.3 or later and to use the +`reindex` API. + +========================================= + +[float] +=== Also see: + * <> +* <> +* <> +* <> +* <> +* <> * <> * <> -* <> -* <> -* <> -* <> * <> -* <> -* <> -* <> -* <> -* <> * <> -* <> -* <> -* <> -* <> +* <> -[[breaking_50_search_changes]] -=== Warmers +include::migrate_5_0/search.asciidoc[] -Thanks to several changes like doc values by default or disk-based norms, -warmers have become quite useless. As a consequence, warmers and the warmer -API have been removed: it is not possible anymore to register queries that -will run before a new IndexSearcher is published. +include::migrate_5_0/mapping.asciidoc[] -Don't worry if you have warmers defined on your indices, they will simply be -ignored when upgrading to 5.0. +include::migrate_5_0/percolator.asciidoc[] -=== Search changes +include::migrate_5_0/index-apis.asciidoc[] -==== `search_type=count` removed +include::migrate_5_0/settings.asciidoc[] -The `count` search type was deprecated since version 2.0.0 and is now removed. -In order to get the same benefits, you just need to set the value of the `size` -parameter to `0`. +include::migrate_5_0/allocation.asciidoc[] -For instance, the following request: +include::migrate_5_0/rest.asciidoc[] -[source,sh] ---------------- -GET /my_index/_search?search_type=count -{ - "aggs": { - "my_terms": { - "terms": { - "field": "foo" - } - } - } -} ---------------- +include::migrate_5_0/cat.asciidoc[] -can be replaced with: +include::migrate_5_0/java.asciidoc[] -[source,sh] ---------------- -GET /my_index/_search -{ - "size": 0, - "aggs": { - "my_terms": { - "terms": { - "field": "foo" - } - } - } -} ---------------- +include::migrate_5_0/packaging.asciidoc[] -==== `search_type=scan` removed +include::migrate_5_0/plugins.asciidoc[] -The `scan` search type was deprecated since version 2.1.0 and is now removed. -All benefits from this search type can now be achieved by doing a scroll -request that sorts documents in `_doc` order, for instance: -[source,sh] ---------------- -GET /my_index/_search?scroll=2m -{ - "sort": [ - "_doc" - ] -} ---------------- - -Scroll requests sorted by `_doc` have been optimized to more efficiently resume -from where the previous request stopped, so this will have the same performance -characteristics as the former `scan` search type. - -==== Boost accuracy for queries on `_all` - -Per-field boosts on the `_all` are now compressed on a single byte instead of -4 bytes previously. While this will make the index more space-efficient, this -also means that the boosts will be less accurately encoded. - -[[breaking_50_rest_api_changes]] -=== REST API changes - -==== id values longer than 512 bytes are rejected - -When specifying an `_id` value longer than 512 bytes, the request will be -rejected. - -==== search exists api removed - -The search exists api has been removed in favour of using the search api with -`size` set to `0` and `terminate_after` set to `1`. - -==== `/_optimize` endpoint removed - -The deprecated `/_optimize` endpoint has been removed. The `/_forcemerge` -endpoint should be used in lieu of optimize. - -The `GET` HTTP verb for `/_forcemerge` is no longer supported, please use the -`POST` HTTP verb. - -==== Deprecated queries removed - -The following deprecated queries have been removed: - -* `filtered`: use `bool` query instead, which supports `filter` clauses too -* `and`: use `must` clauses in a `bool` query instead -* `or`: use should clauses in a `bool` query instead -* `limit`: use `terminate_after` parameter instead -* `fquery`: obsolete after filters and queries have been merged -* `query`: obsolete after filters and queries have been merged - -==== Unified fuzziness parameter - -* Removed support for the deprecated `min_similarity` parameter in `fuzzy query`, in favour of `similarity`. -* Removed support for the deprecated `fuzzy_min_sim` parameter in `query_string` query, in favour of `similarity`. -* Removed support for the deprecated `edit_distance` parameter in completion suggester, in favour of `similarity`. - -==== indices query - -Removed support for the deprecated `filter` and `no_match_filter` fields in `indices` query, -in favour of `query` and `no_match_query`. - -==== nested query - -Removed support for the deprecated `filter` fields in `nested` query, in favour of `query`. - -==== terms query - -Removed support for the deprecated `minimum_should_match` and `disable_coord` in `terms` query, use `bool` query instead. -Removed also support for the deprecated `execution` parameter. - -==== function_score query - -Removed support for the top level `filter` element in `function_score` query, replaced by `query`. - -==== highlighters - -Removed support for multiple highlighter names, the only supported ones are: `plain`, `fvh` and `postings`. - -==== top level filter - -Removed support for the deprecated top level `filter` in the search api, replaced by `post_filter`. - -==== `query_binary` and `filter_binary` removed - -Removed support for the undocumented `query_binary` and `filter_binary` sections of a search request. - -==== `span_near`'s' `collect_payloads` deprecated - -Payloads are now loaded when needed. - -[[breaking_50_cat_api]] -=== CAT API changes - -==== Use Accept header for specifying response media type - -Previous versions of Elasticsearch accepted the Content-type header -field for controlling the media type of the response in the cat API. -This is in opposition to the HTTP spec which specifies the Accept -header field for this purpose. Elasticsearch now uses the Accept header -field and support for using the Content-Type header field for this -purpose has been removed. - -==== Host field removed from the cat nodes API - -The `host` field has been removed from the cat nodes API as its value -is always equal to the `ip` field. The `name` field is available in the -cat nodes API and should be used instead of the `host` field. - -[[breaking_50_parent_child_changes]] -=== Parent/Child changes - -The `children` aggregation, parent child inner hits and `has_child` and `has_parent` queries will not work on indices -with `_parent` field mapping created before version `2.0.0`. The data of these indices need to be re-indexed into a new index. - -The format of the join between parent and child documents have changed with the `2.0.0` release. The old -format can't read from version `5.0.0` and onwards. The new format allows for a much more efficient and -scalable join between parent and child documents and the join data structures are stored on disk -data structures as opposed as before the join data structures were stored in the jvm heap space. - -==== `score_type` has been removed - -The `score_type` option has been removed from the `has_child` and `has_parent` queries in favour of the `score_mode` option -which does the exact same thing. - -==== `sum` score mode removed - -The `sum` score mode has been removed in favour of the `total` mode which does the same and is already available in -previous versions. - -==== `max_children` option - -When `max_children` was set to `0` on the `has_child` query then there was no upper limit on how many children documents -are allowed to match. This has changed and `0` now really means to zero child documents are allowed. If no upper limit -is needed then the `max_children` option shouldn't be defined at all on the `has_child` query. - -==== `_parent` field no longer indexed - -The join between parent and child documents no longer relies on indexed fields and therefor from `5.0.0` onwards -the `_parent` indexed field won't be indexed. In order to find documents that referrer to a specific parent id -the new `parent_id` query can be used. The get response and hits inside the search response remain to include -the parent id under the `_parent` key. - -[[breaking_50_settings_changes]] -=== Settings changes - -From Elasticsearch 5.0 on all settings are validated before they are applied. Node level and default index -level settings are validated on node startup, dynamic cluster and index setting are validated before they are updated/added -to the cluster state. Every setting must be a _known_ setting or in other words all settings must be registered with the -node or transport client they are used with. This implies that plugins that define custom settings must register all of their -settings during pluging loading using the `SettingsModule#registerSettings(Setting)` method. - -==== Node settings - -The `name` setting has been removed and is replaced by `node.name`. Usage of `-Dname=some_node_name` is not supported -anymore. - -==== Transport Settings - -All settings with a `netty` infix have been replaced by their already existing `transport` synonyms. For instance `transport.netty.bind_host` is -no longer supported and should be replaced by the superseding setting `transport.bind_host`. - -==== Analysis settings - -The `index.analysis.analyzer.default_index` analyzer is not supported anymore. -If you wish to change the analyzer to use for indexing, change the -`index.analysis.analyzer.default` analyzer instead. - -==== Ping timeout settings - -Previously, there were three settings for the ping timeout: `discovery.zen.initial_ping_timeout`, -`discovery.zen.ping.timeout` and `discovery.zen.ping_timeout`. The former two have been removed and -the only setting key for the ping timeout is now `discovery.zen.ping_timeout`. The default value for -ping timeouts remains at three seconds. - -==== Recovery settings - -Recovery settings deprecated in 1.x have been removed: - - * `index.shard.recovery.translog_size` is superseded by `indices.recovery.translog_size` - * `index.shard.recovery.translog_ops` is superseded by `indices.recovery.translog_ops` - * `index.shard.recovery.file_chunk_size` is superseded by `indices.recovery.file_chunk_size` - * `index.shard.recovery.concurrent_streams` is superseded by `indices.recovery.concurrent_streams` - * `index.shard.recovery.concurrent_small_file_streams` is superseded by `indices.recovery.concurrent_small_file_streams` - * `indices.recovery.max_size_per_sec` is superseded by `indices.recovery.max_bytes_per_sec` - -If you are using any of these settings please take the time and review their purpose. All of the settings above are considered -_expert settings_ and should only be used if absolutely necessary. If you have set any of the above setting as persistent -cluster settings please use the settings update API and set their superseded keys accordingly. - -The following settings have been removed without replacement - - * `indices.recovery.concurrent_small_file_streams` - recoveries are now single threaded. The number of concurrent outgoing recoveries are throttled via allocation deciders - * `indices.recovery.concurrent_file_streams` - recoveries are now single threaded. The number of concurrent outgoing recoveries are throttled via allocation deciders - -==== Translog settings - -The `index.translog.flush_threshold_ops` setting is not supported anymore. In order to control flushes based on the transaction log -growth use `index.translog.flush_threshold_size` instead. Changing the translog type with `index.translog.fs.type` is not supported -anymore, the `buffered` implementation is now the only available option and uses a fixed `8kb` buffer. - -The translog by default is fsynced on a request basis such that the ability to fsync on every operation is not necessary anymore. In-fact it can -be a performance bottleneck and it's trappy since it enabled by a special value set on `index.translog.sync_interval`. `index.translog.sync_interval` -now doesn't accept a value less than `100ms` which prevents fsyncing too often if async durability is enabled. The special value `0` is not supported anymore. - -==== Request Cache Settings - -The deprecated settings `index.cache.query.enable` and `indices.cache.query.size` have been removed and are replaced with -`index.requests.cache.enable` and `indices.requests.cache.size` respectively. - -`indices.requests.cache.clean_interval` has been replaced with `indices.cache.clean_interval` and is no longer supported. - -==== Field Data Cache Settings - -`indices.fielddata.cache.clean_interval` has been replaced with `indices.cache.clean_interval` and is no longer supported. - -==== Allocation settings - -Allocation settings deprecated in 1.x have been removed: - - * `cluster.routing.allocation.concurrent_recoveries` is superseded by `cluster.routing.allocation.node_concurrent_recoveries` - -Please change the setting in your configuration files or in the clusterstate to use the new settings instead. - -==== Similarity settings - -The 'default' similarity has been renamed to 'classic'. - -==== Indexing settings - -`indices.memory.min_shard_index_buffer_size` and `indices.memory.max_shard_index_buffer_size` are removed since Elasticsearch now allows any one shard to any -amount of heap as long as the total indexing buffer heap used across all shards is below the node's `indices.memory.index_buffer_size` (default: 10% of the JVM heap) - -==== Removed es.max-open-files - -Setting the system property es.max-open-files to true to get -Elasticsearch to print the number of maximum open files for the -Elasticsearch process has been removed. This same information can be -obtained from the <> API, and a warning is logged -on startup if it is set too low. - -==== Removed es.netty.gathering - -Disabling Netty from using NIO gathering could be done via the escape -hatch of setting the system property "es.netty.gathering" to "false". -Time has proven enabling gathering by default is a non-issue and this -non-documented setting has been removed. - -==== Removed es.useLinkedTransferQueue - -The system property `es.useLinkedTransferQueue` could be used to -control the queue implementation used in the cluster service and the -handling of ping responses during discovery. This was an undocumented -setting and has been removed. - -[[breaking_50_mapping_changes]] -=== Mapping changes - -==== Default doc values settings - -Doc values are now also on by default on numeric and boolean fields that are -not indexed. - -==== Transform removed - -The `transform` feature from mappings has been removed. It made issues very hard to debug. - -==== Default number mappings - -When a floating-point number is encountered, it is now dynamically mapped as a -float by default instead of a double. The reasoning is that floats should be -more than enough for most cases but would decrease storage requirements -significantly. - -==== `index` property - -On all types but `string`, the `index` property now only accepts `true`/`false` -instead of `not_analyzed`/`no`. The `string` field still accepts -`analyzed`/`not_analyzed`/`no`. - -==== ++_source++'s `format` option - -The `_source` mapping does not support the `format` option anymore. This option -will still be accepted for indices created before the upgrade to 5.0 for backward -compatibility, but it will have no effect. Indices created on or after 5.0 will -reject this option. - -==== Object notation - -Core types don't support the object notation anymore, which allowed to provide -values as follows: - -[source,json] ---------------- -{ - "value": "field_value", - "boost": 42 -} ---------------- - -==== `fielddata.format` - -Setting `fielddata.format: doc_values` in the mappings used to implicitly -enable doc values on a field. This no longer works: the only way to enable or -disable doc values is by using the `doc_values` property of mappings. - - -[[breaking_50_plugins]] -=== Plugin changes - -The command `bin/plugin` has been renamed to `bin/elasticsearch-plugin`. -The structure of the plugin has changed. All the plugin files must be contained in a directory called `elasticsearch`. -If you use the gradle build, this structure is automatically generated. - -==== Site plugins removed - -Site plugins have been removed. It is recommended to migrate site plugins to Kibana plugins. - -==== Multicast plugin removed - -Multicast has been removed. Use unicast discovery, or one of the cloud discovery plugins. - -==== Plugins with custom query implementations - -Plugins implementing custom queries need to implement the `fromXContent(QueryParseContext)` method in their -`QueryParser` subclass rather than `parse`. This method will take care of parsing the query from `XContent` format -into an intermediate query representation that can be streamed between the nodes in binary format, effectively the -query object used in the java api. Also, the query parser needs to implement the `getBuilderPrototype` method that -returns a prototype of the `NamedWriteable` query, which allows to deserialize an incoming query by calling -`readFrom(StreamInput)` against it, which will create a new object, see usages of `Writeable`. The `QueryParser` -also needs to declare the generic type of the query that it supports and it's able to parse. -The query object can then transform itself into a lucene query through the new `toQuery(QueryShardContext)` method, -which returns a lucene query to be executed on the data node. - -Similarly, plugins implementing custom score functions need to implement the `fromXContent(QueryParseContext)` -method in their `ScoreFunctionParser` subclass rather than `parse`. This method will take care of parsing -the function from `XContent` format into an intermediate function representation that can be streamed between -the nodes in binary format, effectively the function object used in the java api. Also, the query parser needs -to implement the `getBuilderPrototype` method that returns a prototype of the `NamedWriteable` function, which -allows to deserialize an incoming function by calling `readFrom(StreamInput)` against it, which will create a -new object, see usages of `Writeable`. The `ScoreFunctionParser` also needs to declare the generic type of the -function that it supports and it's able to parse. The function object can then transform itself into a lucene -function through the new `toFunction(QueryShardContext)` method, which returns a lucene function to be executed -on the data node. - -==== Cloud AWS plugin changes - -Cloud AWS plugin has been split in two plugins: - -* {plugins}/discovery-ec2.html[Discovery EC2 plugin] -* {plugins}/repository-s3.html[Repository S3 plugin] - -Proxy settings for both plugins have been renamed: - -* from `cloud.aws.proxy_host` to `cloud.aws.proxy.host` -* from `cloud.aws.ec2.proxy_host` to `cloud.aws.ec2.proxy.host` -* from `cloud.aws.s3.proxy_host` to `cloud.aws.s3.proxy.host` -* from `cloud.aws.proxy_port` to `cloud.aws.proxy.port` -* from `cloud.aws.ec2.proxy_port` to `cloud.aws.ec2.proxy.port` -* from `cloud.aws.s3.proxy_port` to `cloud.aws.s3.proxy.port` - -==== Cloud Azure plugin changes - -Cloud Azure plugin has been split in three plugins: - -* {plugins}/discovery-azure.html[Discovery Azure plugin] -* {plugins}/repository-azure.html[Repository Azure plugin] -* {plugins}/store-smb.html[Store SMB plugin] - -If you were using the `cloud-azure` plugin for snapshot and restore, you had in `elasticsearch.yml`: - -[source,yaml] ------ -cloud: - azure: - storage: - account: your_azure_storage_account - key: your_azure_storage_key ------ - -You need to give a unique id to the storage details now as you can define multiple storage accounts: - -[source,yaml] ------ -cloud: - azure: - storage: - my_account: - account: your_azure_storage_account - key: your_azure_storage_key ------ - - -==== Cloud GCE plugin changes - -Cloud GCE plugin has been renamed to {plugins}/discovery-gce.html[Discovery GCE plugin]. - - -==== Mapper Attachments plugin deprecated - -Mapper attachments has been deprecated. Users should use now the {plugins}/ingest-attachment.html[`ingest-attachment`] -plugin. - - -[[breaking_50_java_api_changes]] -=== Java API changes - -==== Count api has been removed - -The deprecated count api has been removed from the Java api, use the search api instead and set size to 0. - -The following call - -[source,java] ------ -client.prepareCount(indices).setQuery(query).get(); ------ - -can be replaced with - -[source,java] ------ -client.prepareSearch(indices).setSource(new SearchSourceBuilder().size(0).query(query)).get(); ------ - -==== BoostingQueryBuilder - -Removed setters for mandatory positive/negative query. Both arguments now have -to be supplied at construction time already and have to be non-null. - -==== SpanContainingQueryBuilder - -Removed setters for mandatory big/little inner span queries. Both arguments now have -to be supplied at construction time already and have to be non-null. Updated -static factory methods in QueryBuilders accordingly. - -==== SpanOrQueryBuilder - -Making sure that query contains at least one clause by making initial clause mandatory -in constructor. - -==== SpanNearQueryBuilder - -Removed setter for mandatory slop parameter, needs to be set in constructor now. Also -making sure that query contains at least one clause by making initial clause mandatory -in constructor. Updated the static factory methods in QueryBuilders accordingly. - -==== SpanNotQueryBuilder - -Removed setter for mandatory include/exclude span query clause, needs to be set in constructor now. -Updated the static factory methods in QueryBuilders and tests accordingly. - -==== SpanWithinQueryBuilder - -Removed setters for mandatory big/little inner span queries. Both arguments now have -to be supplied at construction time already and have to be non-null. Updated -static factory methods in QueryBuilders accordingly. - -==== QueryFilterBuilder - -Removed the setter `queryName(String queryName)` since this field is not supported -in this type of query. Use `FQueryFilterBuilder.queryName(String queryName)` instead -when in need to wrap a named query as a filter. - -==== WrapperQueryBuilder - -Removed `wrapperQueryBuilder(byte[] source, int offset, int length)`. Instead simply -use `wrapperQueryBuilder(byte[] source)`. Updated the static factory methods in -QueryBuilders accordingly. - -==== QueryStringQueryBuilder - -Removed ability to pass in boost value using `field(String field)` method in form e.g. `field^2`. -Use the `field(String, float)` method instead. - -==== Operator - -Removed the enums called `Operator` from `MatchQueryBuilder`, `QueryStringQueryBuilder`, -`SimpleQueryStringBuilder`, and `CommonTermsQueryBuilder` in favour of using the enum -defined in `org.elasticsearch.index.query.Operator` in an effort to consolidate the -codebase and avoid duplication. - -==== queryName and boost support - -Support for `queryName` and `boost` has been streamlined to all of the queries. That is -a breaking change till queries get sent over the network as serialized json rather -than in `Streamable` format. In fact whenever additional fields are added to the json -representation of the query, older nodes might throw error when they find unknown fields. - -==== InnerHitsBuilder - -InnerHitsBuilder now has a dedicated addParentChildInnerHits and addNestedInnerHits methods -to differentiate between inner hits for nested vs. parent / child documents. This change -makes the type / path parameter mandatory. - -==== MatchQueryBuilder - -Moving MatchQueryBuilder.Type and MatchQueryBuilder.ZeroTermsQuery enum to MatchQuery.Type. -Also reusing new Operator enum. - -==== MoreLikeThisQueryBuilder - -Removed `MoreLikeThisQueryBuilder.Item#id(String id)`, `Item#doc(BytesReference doc)`, -`Item#doc(XContentBuilder doc)`. Use provided constructors instead. - -Removed `MoreLikeThisQueryBuilder#addLike` in favor of texts and/or items being provided -at construction time. Using arrays there instead of lists now. - -Removed `MoreLikeThisQueryBuilder#addUnlike` in favor to using the `unlike` methods -which take arrays as arguments now rather than the lists used before. - -The deprecated `docs(Item... docs)`, `ignoreLike(Item... docs)`, -`ignoreLike(String... likeText)`, `addItem(Item... likeItems)` have been removed. - -==== GeoDistanceQueryBuilder - -Removing individual setters for lon() and lat() values, both values should be set together - using point(lon, lat). - -==== GeoDistanceRangeQueryBuilder - -Removing setters for to(Object ...) and from(Object ...) in favour of the only two allowed input -arguments (String, Number). Removing setter for center point (point(), geohash()) because parameter -is mandatory and should already be set in constructor. -Also removing setters for lt(), lte(), gt(), gte() since they can all be replaced by equivalent -calls to to/from() and inludeLower()/includeUpper(). - -==== GeoPolygonQueryBuilder - -Require shell of polygon already to be specified in constructor instead of adding it pointwise. -This enables validation, but makes it necessary to remove the addPoint() methods. - -==== MultiMatchQueryBuilder - -Moving MultiMatchQueryBuilder.ZeroTermsQuery enum to MatchQuery.ZeroTermsQuery. -Also reusing new Operator enum. - -Removed ability to pass in boost value using `field(String field)` method in form e.g. `field^2`. -Use the `field(String, float)` method instead. - -==== MissingQueryBuilder - -The MissingQueryBuilder which was deprecated in 2.2.0 is removed. As a replacement use ExistsQueryBuilder -inside a mustNot() clause. So instead of using `new ExistsQueryBuilder(name)` now use -`new BoolQueryBuilder().mustNot(new ExistsQueryBuilder(name))`. - -==== NotQueryBuilder - -The NotQueryBuilder which was deprecated in 2.1.0 is removed. As a replacement use BoolQueryBuilder -with added mustNot() clause. So instead of using `new NotQueryBuilder(filter)` now use -`new BoolQueryBuilder().mustNot(filter)`. - -==== TermsQueryBuilder - -Remove the setter for `termsLookup()`, making it only possible to either use a TermsLookup object or -individual values at construction time. Also moving individual settings for the TermsLookup (lookupIndex, -lookupType, lookupId, lookupPath) to the separate TermsLookup class, using constructor only and moving -checks for validation there. Removed `TermsLookupQueryBuilder` in favour of `TermsQueryBuilder`. - -==== FunctionScoreQueryBuilder - -`add` methods have been removed, all filters and functions must be provided as constructor arguments by -creating an array of `FunctionScoreQueryBuilder.FilterFunctionBuilder` objects, containing one element -for each filter/function pair. - -`scoreMode` and `boostMode` can only be provided using corresponding enum members instead -of string values: see `FilterFunctionScoreQuery.ScoreMode` and `CombineFunction`. - -`CombineFunction.MULT` has been renamed to `MULTIPLY`. - -==== IdsQueryBuilder - -For simplicity, only one way of adding the ids to the existing list (empty by default) is left: `addIds(String...)` - -==== DocumentAlreadyExistsException removed - -`DocumentAlreadyExistsException` is removed and a `VersionConflictException` is thrown instead (with a better -error description). This will influence code that use the `IndexRequest.opType()` or `IndexRequest.create()` -to index a document only if it doesn't already exist. - -==== ShapeBuilders - -`InternalLineStringBuilder` is removed in favour of `LineStringBuilder`, `InternalPolygonBuilder` in favour of PolygonBuilder` and `Ring` has been replaced with `LineStringBuilder`. Also the abstract base classes `BaseLineStringBuilder` and `BasePolygonBuilder` haven been merged with their corresponding implementations. - -==== RescoreBuilder - -`RecoreBuilder.Rescorer` was merged with `RescoreBuilder`, which now is an abstract superclass. QueryRescoreBuilder currently is its only implementation. - -==== PhraseSuggestionBuilder - -The inner DirectCandidateGenerator class has been moved out to its own class called DirectCandidateGeneratorBuilder. - -==== SuggestBuilder - -The `setText` method has been changed to `setGlobalText` to make the intent more clear, and a `getGlobalText` method has been added. - -The `addSuggestion` method now required the user specified suggestion name, previously used in the ctor of each -suggestion. - -=== SuggestionBuilder - -The `field` setter has been deleted. Instead the field name needs to be specified as constructor argument. - -==== Elasticsearch will no longer detect logging implementations - -Elasticsearch now logs only to log4j 1.2. Previously if log4j wasn't on the classpath it made some effort to degrade to -slf4j or java.util.logging. Now it'll fail to work without the log4j 1.2 api. The log4j-over-slf4j bridge ought to work -when using the java client. As should log4j 2's log4j-1.2-api. The Elasticsearch server now only supports log4j as -configured by logging.yml and it no longer makes any effort to work if log4j isn't present. - -[[breaking_50_cache_concurrency]] -=== Cache concurrency level settings removed - -Two cache concurrency level settings `indices.requests.cache.concurrency_level` and -`indices.fielddata.cache.concurrency_level` because they no longer apply to the cache implementation used for the -request cache and the field data cache. - -[[breaking_50_non_loopback]] -=== Remove bind option of `non_loopback` - -This setting would arbitrarily pick the first interface not marked as loopback. Instead, specify by address -scope (e.g. `_local_,_site_` for all loopback and private network addresses) or by explicit interface names, -hostnames, or addresses. - -[[breaking_50_thread_pool]] -=== Forbid changing of thread pool types - -Previously, <> could be dynamically adjusted. The thread pool type effectively -controls the backing queue for the thread pool and modifying this is an expert setting with minimal practical benefits -and high risk of being misused. The ability to change the thread pool type for any thread pool has been removed; do note -that it is still possible to adjust relevant thread pool parameters for each of the thread pools (e.g., depending on -the thread pool type, `keep_alive`, `queue_size`, etc.). - -[[breaking_50_cpu_stats]] -=== System CPU stats - -The recent CPU usage (as a percent) has been added to the OS stats -reported under the node stats API and the cat nodes API. The breaking -change here is that there is a new object in the `os` object in the node -stats response. This object is called `cpu` and includes "percent" and -`load_average` as fields. This moves the `load_average` field that was -previously a top-level field in the `os` object to the `cpu` object. The -format of the `load_average` field has changed to an object with fields -`1m`, `5m`, and `15m` representing the one-minute, five-minute and -fifteen-minute loads respectively. If any of these fields are not present, -it indicates that the corresponding value is not available. - -In the cat nodes API response, the `cpu` field is output by default. The -previous `load` field has been removed and is replaced by `load_1m`, -`load_5m`, and `load_15m` which represent the one-minute, five-minute -and fifteen-minute loads respectively. The field will be null if the -corresponding value is not available. - -Finally, the API for `org.elasticsearch.monitor.os.OsStats` has -changed. The `getLoadAverage` method has been removed. The value for -this can now be obtained from `OsStats.Cpu#getLoadAverage` but it is no -longer a double and is instead an object encapsulating the one-minute, -five-minute and fifteen-minute load averages. Additionally, the recent -CPU usage can be obtained from `OsStats.Cpu#getPercent`. - -=== Fields option -Only stored fields are retrievable with this option. -The fields option won't be able to load non stored fields from _source anymore. - -[[breaking_50_allocation]] -=== Primary shard allocation - -Previously, primary shards were only assigned if a quorum of shard copies were found (configurable using -`index.recovery.initial_shards`, now deprecated). In case where a primary had only a single replica, quorum was defined -to be a single shard. This meant that any shard copy of an index with replication factor 1 could become primary, even it -was a stale copy of the data on disk. This is now fixed by using allocation IDs. - -Allocation IDs assign unique identifiers to shard copies. This allows the cluster to differentiate between multiple -copies of the same data and track which shards have been active, so that after a cluster restart, shard copies -containing only the most recent data can become primaries. - -=== Indices Shard Stores command - -By using allocation IDs instead of version numbers to identify shard copies for primary shard allocation, the former versioning scheme -has become obsolete. This is reflected in the indices-shards-stores.html[Indices Shard Stores API]. A new field `allocation_id` replaces the -former `version` field in the result of the Indices Shard Stores command. This field is available for all shard copies that have been either -created with the current version of Elasticsearch or have been active in a cluster running a current version of Elasticsearch. For legacy -shard copies that have not been active in a current version of Elasticsearch, a `legacy_version` field is available instead (equivalent to -the former `version` field). - -=== Reroute commands - -The reroute command `allocate` has been split into two distinct commands `allocate_replica` and `allocate_empty_primary`. -This was done as we introduced a new `allocate_stale_primary` command. The new `allocate_replica` command corresponds to the -old `allocate` command with `allow_primary` set to false. The new `allocate_empty_primary` command corresponds to the old -`allocate` command with `allow_primary` set to true. - -==== `index.shared_filesystem.recover_on_any_node` changes - -The behavior of `index.shared_filesystem.recover_on_any_node = true` has been changed. Previously, in the case where no -shard copies could be found, an arbitrary node was chosen by potentially ignoring allocation deciders. Now, we take -balancing into account but don't assign the shard if the allocation deciders are not satisfied. The behavior has also changed -in the case where shard copies can be found. Previously, a node not holding the shard copy was chosen if none of the nodes -holding shard copies were satisfying the allocation deciders. Now, the shard will be assigned to a node having a shard copy, -even if none of the nodes holding a shard copy satisfy the allocation deciders. - -[[breaking_50_percolator]] -=== Percolator - -Adding percolator queries and modifications to existing percolator queries are no longer visible in immediately -to the percolator. A refresh is required to run before the changes are visible to the percolator. - -The reason that this has changed is that on newly created indices the percolator automatically indexes the query terms -and these query terms are used at percolate time to reduce the amount of queries the percolate API needs evaluate. -This optimization didn't work in the percolate API mode where modifications to queries are immediately visible. - -The percolator by defaults sets the `size` option to `10` whereas before this was set to unlimited. - -The percolate api can no longer accept documents that have fields that don't exist in the mapping. - -When percolating an existing document then specifying a document in the source of the percolate request is not allowed -any more. - -The percolate api no longer modifies the mappings. Before the percolate api could be used to dynamically introduce new -fields to the mappings based on the fields in the document being percolated. This no longer works, because these -unmapped fields are not persisted in the mapping. - -Percolator documents are no longer excluded from the search response. - -[[breaking_50_packaging]] -=== Packaging - -==== Default logging using systemd (since Elasticsearch 2.2.0) - -In previous versions of Elasticsearch, the default logging -configuration routed standard output to /dev/null and standard error to -the journal. However, there are often critical error messages at -startup that are logged to standard output rather than standard error -and these error messages would be lost to the nether. The default has -changed to now route standard output to the journal and standard error -to inherit this setting (these are the defaults for systemd). These -settings can be modified by editing the elasticsearch.service file. - -==== Longer startup times - -In Elasticsearch 5.0.0 the `-XX:+AlwaysPreTouch` flag has been added to the JVM -startup options. This option touches all memory pages used by the JVM heap -during initialization of the HotSpot VM to reduce the chance of having to commit -a memory page during GC time. This will increase the startup time of -Elasticsearch as well as increasing the initial resident memory usage of the -Java process. - -[[breaking_50_scripting]] -=== Scripting - -==== Script mode settings - -Previously script mode settings (e.g., "script.inline: true", -"script.engine.groovy.inline.aggs: false", etc.) accepted the values -`on`, `true`, `1`, and `yes` for enabling a scripting mode, and the -values `off`, `false`, `0`, and `no` for disabling a scripting mode. -The variants `on`, `1`, and `yes ` for enabling and `off`, `0`, -and `no` for disabling are no longer supported. - -==== Groovy dependencies - -In previous versions of Elasticsearch, the Groovy scripting capabilities -depended on the `org.codehaus.groovy:groovy-all` artifact. In addition -to pulling in the Groovy language, this pulls in a very large set of -functionality, none of which is needed for scripting within -Elasticsearch. Aside from the inherent difficulties in managing such a -large set of dependencies, this also increases the surface area for -security issues. This dependency has been reduced to the core Groovy -language `org.codehaus.groovy:groovy` artifact. - -[[breaking_50_term_vectors]] -=== Term vectors - -The term vectors APIs no longer persist unmapped fields in the mappings. - -The `dfs` parameter has been removed completely, term vectors don't support -distributed document frequencies anymore. - -[[breaking_50_security]] -=== Security - -The option to disable the security manager `--security.manager.enabled` has been removed. In order to grant special -permissions to elasticsearch users must tweak the local Java Security Policy. - -[[breaking_50_snapshot_restore]] -=== Snapshot/Restore - -==== Closing / deleting indices while running snapshot - -In previous versions of Elasticsearch, closing or deleting an index during a full snapshot would make the snapshot fail. This is now changed -by failing the close/delete index request instead. The behavior for partial snapshots remains unchanged: Closing or deleting an index during -a partial snapshot is still possible. The snapshot result is then marked as partial. diff --git a/docs/reference/migration/migrate_5_0/allocation.asciidoc b/docs/reference/migration/migrate_5_0/allocation.asciidoc new file mode 100644 index 00000000000..1e095831381 --- /dev/null +++ b/docs/reference/migration/migrate_5_0/allocation.asciidoc @@ -0,0 +1,54 @@ +[[breaking_50_allocation]] +=== Allocation changes + +==== Primary shard allocation + +Previously, primary shards were only assigned if a quorum of shard copies were +found (configurable using `index.recovery.initial_shards`, now deprecated). In +case where a primary had only a single replica, quorum was defined to be a +single shard. This meant that any shard copy of an index with replication +factor 1 could become primary, even it was a stale copy of the data on disk. +This is now fixed thanks to shard allocation IDs. + +Allocation IDs assign unique identifiers to shard copies. This allows the +cluster to differentiate between multiple copies of the same data and track +which shards have been active so that, after a cluster restart, only shard +copies containing the most recent data can become primaries. + +==== Indices Shard Stores command + +By using allocation IDs instead of version numbers to identify shard copies +for primary shard allocation, the former versioning scheme has become +obsolete. This is reflected in the +<>. + +A new `allocation_id` field replaces the former `version` field in the result +of the Indices Shard Stores command. This field is available for all shard +copies that have been either created with the current version of Elasticsearch +or have been active in a cluster running a current version of Elasticsearch. +For legacy shard copies that have not been active in a current version of +Elasticsearch, a `legacy_version` field is available instead (equivalent to +the former `version` field). + +==== Reroute commands + +The reroute command `allocate` has been split into two distinct commands +`allocate_replica` and `allocate_empty_primary`. This was done as we +introduced a new `allocate_stale_primary` command. The new `allocate_replica` +command corresponds to the old `allocate` command with `allow_primary` set to +false. The new `allocate_empty_primary` command corresponds to the old +`allocate` command with `allow_primary` set to true. + +==== `index.shared_filesystem.recover_on_any_node` changes + +The behavior of `index.shared_filesystem.recover_on_any_node: true` has been +changed. Previously, in the case where no shard copies could be found, an +arbitrary node was chosen by potentially ignoring allocation deciders. Now, we +take balancing into account but don't assign the shard if the allocation +deciders are not satisfied. + +The behavior has also changed in the case where shard copies can be found. +Previously, a node not holding the shard copy was chosen if none of the nodes +holding shard copies were satisfying the allocation deciders. Now, the shard +will be assigned to a node having a shard copy, even if none of the nodes +holding a shard copy satisfy the allocation deciders. diff --git a/docs/reference/migration/migrate_5_0/cat.asciidoc b/docs/reference/migration/migrate_5_0/cat.asciidoc new file mode 100644 index 00000000000..c3b1c84ee8d --- /dev/null +++ b/docs/reference/migration/migrate_5_0/cat.asciidoc @@ -0,0 +1,33 @@ +[[breaking_50_cat_api]] +=== CAT API changes + +==== Use Accept header for specifying response media type + +Previous versions of Elasticsearch accepted the Content-type header +field for controlling the media type of the response in the cat API. +This is in opposition to the HTTP spec which specifies the Accept +header field for this purpose. Elasticsearch now uses the Accept header +field and support for using the Content-Type header field for this +purpose has been removed. + +==== Host field removed from the cat nodes API + +The `host` field has been removed from the cat nodes API as its value +is always equal to the `ip` field. The `name` field is available in the +cat nodes API and should be used instead of the `host` field. + +==== Changes to cat recovery API + +The fields `bytes_recovered` and `files_recovered` have been added to +the cat recovery API. These fields, respectively, indicate the total +number of bytes and files that have been recovered. + +The fields `total_files` and `total_bytes` have been renamed to +`files_total` and `bytes_total`, respectively. + +Additionally, the field `translog` has been renamed to +`translog_ops_recovered`, the field `translog_total` to +`translog_ops` and the field `translog_percent` to +`translog_ops_percent`. The short aliases for these fields are `tor`, +`to`, and `top`, respectively. + diff --git a/docs/reference/migration/migrate_5_0/index-apis.asciidoc b/docs/reference/migration/migrate_5_0/index-apis.asciidoc new file mode 100644 index 00000000000..72651295bbc --- /dev/null +++ b/docs/reference/migration/migrate_5_0/index-apis.asciidoc @@ -0,0 +1,48 @@ +[[breaking_50_index_apis]] +=== Index APIs changes + +==== Closing / deleting indices while running snapshot + +In previous versions of Elasticsearch, closing or deleting an index during a +full snapshot would make the snapshot fail. In 5.0, the close/delete index +request will fail instead. The behavior for partial snapshots remains +unchanged: Closing or deleting an index during a partial snapshot is still +possible. The snapshot result is then marked as partial. + +==== Warmers + +Thanks to several changes like doc values by default and disk-based norms, +warmers are no longer useful. As a consequence, warmers and the warmer API +have been removed: it is no longer possible to register queries that will run +before a new IndexSearcher is published. + +Don't worry if you have warmers defined on your indices, they will simply be +ignored when upgrading to 5.0. + +==== System CPU stats + +The recent CPU usage (as a percent) has been added to the OS stats +reported under the node stats API and the cat nodes API. The breaking +change here is that there is a new object in the `os` object in the node +stats response. This object is called `cpu` and includes percent` and +`load_average` as fields. This moves the `load_average` field that was +previously a top-level field in the `os` object to the `cpu` object. The +format of the `load_average` field has changed to an object with fields +`1m`, `5m`, and `15m` representing the one-minute, five-minute and +fifteen-minute loads respectively. If any of these fields are not present, +it indicates that the corresponding value is not available. + +In the cat nodes API response, the `cpu` field is output by default. The +previous `load` field has been removed and is replaced by `load_1m`, +`load_5m`, and `load_15m` which represent the one-minute, five-minute +and fifteen-minute loads respectively. The field will be null if the +corresponding value is not available. + +Finally, the API for `org.elasticsearch.monitor.os.OsStats` has +changed. The `getLoadAverage` method has been removed. The value for +this can now be obtained from `OsStats.Cpu#getLoadAverage` but it is no +longer a double and is instead an object encapsulating the one-minute, +five-minute and fifteen-minute load averages. Additionally, the recent +CPU usage can be obtained from `OsStats.Cpu#getPercent`. + + diff --git a/docs/reference/migration/migrate_5_0/java.asciidoc b/docs/reference/migration/migrate_5_0/java.asciidoc new file mode 100644 index 00000000000..f9a5a171370 --- /dev/null +++ b/docs/reference/migration/migrate_5_0/java.asciidoc @@ -0,0 +1,222 @@ + + + +[[breaking_50_java_api_changes]] +=== Java API changes + +==== Count api has been removed + +The deprecated count api has been removed from the Java api, use the search api instead and set size to 0. + +The following call + +[source,java] +----- +client.prepareCount(indices).setQuery(query).get(); +----- + +can be replaced with + +[source,java] +----- +client.prepareSearch(indices).setSource(new SearchSourceBuilder().size(0).query(query)).get(); +----- + +==== Elasticsearch will no longer detect logging implementations + +Elasticsearch now logs only to log4j 1.2. Previously if log4j wasn't on the +classpath it made some effort to degrade to slf4j or java.util.logging. Now it +will fail to work without the log4j 1.2 api. The log4j-over-slf4j bridge ought +to work when using the java client, as should log4j 2's log4j-1.2-api. The +Elasticsearch server now only supports log4j as configured by `logging.yml` +and will fail if log4j isn't present. + +==== Groovy dependencies + +In previous versions of Elasticsearch, the Groovy scripting capabilities +depended on the `org.codehaus.groovy:groovy-all` artifact. In addition +to pulling in the Groovy language, this pulls in a very large set of +functionality, none of which is needed for scripting within +Elasticsearch. Aside from the inherent difficulties in managing such a +large set of dependencies, this also increases the surface area for +security issues. This dependency has been reduced to the core Groovy +language `org.codehaus.groovy:groovy` artifact. + +==== DocumentAlreadyExistsException removed + +`DocumentAlreadyExistsException` is removed and a `VersionConflictException` is thrown instead (with a better +error description). This will influence code that use the `IndexRequest.opType()` or `IndexRequest.create()` +to index a document only if it doesn't already exist. + +==== Changes to Query Builders + +===== BoostingQueryBuilder + +Removed setters for mandatory positive/negative query. Both arguments now have +to be supplied at construction time already and have to be non-null. + +===== SpanContainingQueryBuilder + +Removed setters for mandatory big/little inner span queries. Both arguments now have +to be supplied at construction time already and have to be non-null. Updated +static factory methods in QueryBuilders accordingly. + +===== SpanOrQueryBuilder + +Making sure that query contains at least one clause by making initial clause mandatory +in constructor. + +===== SpanNearQueryBuilder + +Removed setter for mandatory slop parameter, needs to be set in constructor now. Also +making sure that query contains at least one clause by making initial clause mandatory +in constructor. Updated the static factory methods in QueryBuilders accordingly. + +===== SpanNotQueryBuilder + +Removed setter for mandatory include/exclude span query clause, needs to be set in constructor now. +Updated the static factory methods in QueryBuilders and tests accordingly. + +===== SpanWithinQueryBuilder + +Removed setters for mandatory big/little inner span queries. Both arguments now have +to be supplied at construction time already and have to be non-null. Updated +static factory methods in QueryBuilders accordingly. + +===== QueryFilterBuilder + +Removed the setter `queryName(String queryName)` since this field is not supported +in this type of query. Use `FQueryFilterBuilder.queryName(String queryName)` instead +when in need to wrap a named query as a filter. + +===== WrapperQueryBuilder + +Removed `wrapperQueryBuilder(byte[] source, int offset, int length)`. Instead simply +use `wrapperQueryBuilder(byte[] source)`. Updated the static factory methods in +QueryBuilders accordingly. + +===== QueryStringQueryBuilder + +Removed ability to pass in boost value using `field(String field)` method in form e.g. `field^2`. +Use the `field(String, float)` method instead. + +===== Operator + +Removed the enums called `Operator` from `MatchQueryBuilder`, `QueryStringQueryBuilder`, +`SimpleQueryStringBuilder`, and `CommonTermsQueryBuilder` in favour of using the enum +defined in `org.elasticsearch.index.query.Operator` in an effort to consolidate the +codebase and avoid duplication. + +===== queryName and boost support + +Support for `queryName` and `boost` has been streamlined to all of the queries. That is +a breaking change till queries get sent over the network as serialized json rather +than in `Streamable` format. In fact whenever additional fields are added to the json +representation of the query, older nodes might throw error when they find unknown fields. + +===== InnerHitsBuilder + +InnerHitsBuilder now has a dedicated addParentChildInnerHits and addNestedInnerHits methods +to differentiate between inner hits for nested vs. parent / child documents. This change +makes the type / path parameter mandatory. + +===== MatchQueryBuilder + +Moving MatchQueryBuilder.Type and MatchQueryBuilder.ZeroTermsQuery enum to MatchQuery.Type. +Also reusing new Operator enum. + +===== MoreLikeThisQueryBuilder + +Removed `MoreLikeThisQueryBuilder.Item#id(String id)`, `Item#doc(BytesReference doc)`, +`Item#doc(XContentBuilder doc)`. Use provided constructors instead. + +Removed `MoreLikeThisQueryBuilder#addLike` in favor of texts and/or items being provided +at construction time. Using arrays there instead of lists now. + +Removed `MoreLikeThisQueryBuilder#addUnlike` in favor to using the `unlike` methods +which take arrays as arguments now rather than the lists used before. + +The deprecated `docs(Item... docs)`, `ignoreLike(Item... docs)`, +`ignoreLike(String... likeText)`, `addItem(Item... likeItems)` have been removed. + +===== GeoDistanceQueryBuilder + +Removing individual setters for lon() and lat() values, both values should be set together + using point(lon, lat). + +===== GeoDistanceRangeQueryBuilder + +Removing setters for to(Object ...) and from(Object ...) in favour of the only two allowed input +arguments (String, Number). Removing setter for center point (point(), geohash()) because parameter +is mandatory and should already be set in constructor. +Also removing setters for lt(), lte(), gt(), gte() since they can all be replaced by equivalent +calls to to/from() and inludeLower()/includeUpper(). + +===== GeoPolygonQueryBuilder + +Require shell of polygon already to be specified in constructor instead of adding it pointwise. +This enables validation, but makes it necessary to remove the addPoint() methods. + +===== MultiMatchQueryBuilder + +Moving MultiMatchQueryBuilder.ZeroTermsQuery enum to MatchQuery.ZeroTermsQuery. +Also reusing new Operator enum. + +Removed ability to pass in boost value using `field(String field)` method in form e.g. `field^2`. +Use the `field(String, float)` method instead. + +===== MissingQueryBuilder + +The MissingQueryBuilder which was deprecated in 2.2.0 is removed. As a replacement use ExistsQueryBuilder +inside a mustNot() clause. So instead of using `new ExistsQueryBuilder(name)` now use +`new BoolQueryBuilder().mustNot(new ExistsQueryBuilder(name))`. + +===== NotQueryBuilder + +The NotQueryBuilder which was deprecated in 2.1.0 is removed. As a replacement use BoolQueryBuilder +with added mustNot() clause. So instead of using `new NotQueryBuilder(filter)` now use +`new BoolQueryBuilder().mustNot(filter)`. + +===== TermsQueryBuilder + +Remove the setter for `termsLookup()`, making it only possible to either use a TermsLookup object or +individual values at construction time. Also moving individual settings for the TermsLookup (lookupIndex, +lookupType, lookupId, lookupPath) to the separate TermsLookup class, using constructor only and moving +checks for validation there. Removed `TermsLookupQueryBuilder` in favour of `TermsQueryBuilder`. + +===== FunctionScoreQueryBuilder + +`add` methods have been removed, all filters and functions must be provided as constructor arguments by +creating an array of `FunctionScoreQueryBuilder.FilterFunctionBuilder` objects, containing one element +for each filter/function pair. + +`scoreMode` and `boostMode` can only be provided using corresponding enum members instead +of string values: see `FilterFunctionScoreQuery.ScoreMode` and `CombineFunction`. + +`CombineFunction.MULT` has been renamed to `MULTIPLY`. + +===== IdsQueryBuilder + +For simplicity, only one way of adding the ids to the existing list (empty by default) is left: `addIds(String...)` + +===== ShapeBuilders + +`InternalLineStringBuilder` is removed in favour of `LineStringBuilder`, `InternalPolygonBuilder` in favour of PolygonBuilder` and `Ring` has been replaced with `LineStringBuilder`. Also the abstract base classes `BaseLineStringBuilder` and `BasePolygonBuilder` haven been merged with their corresponding implementations. + +===== RescoreBuilder + +`RecoreBuilder.Rescorer` was merged with `RescoreBuilder`, which now is an abstract superclass. QueryRescoreBuilder currently is its only implementation. + +===== PhraseSuggestionBuilder + +The inner DirectCandidateGenerator class has been moved out to its own class called DirectCandidateGeneratorBuilder. + +==== SuggestBuilder + +The `setText` method has been changed to `setGlobalText` to make the intent more clear, and a `getGlobalText` method has been added. + +The `addSuggestion` method now required the user specified suggestion name, previously used in the ctor of each suggestion. + +=== SuggestionBuilder + +The `field` setter has been deleted. Instead the field name needs to be specified as constructor argument. diff --git a/docs/reference/migration/migrate_5_0/mapping.asciidoc b/docs/reference/migration/migrate_5_0/mapping.asciidoc new file mode 100644 index 00000000000..768a2438d3e --- /dev/null +++ b/docs/reference/migration/migrate_5_0/mapping.asciidoc @@ -0,0 +1,82 @@ +[[breaking_50_mapping_changes]] +=== Mapping changes + +==== `string` fields replaced by `text`/`keyword` fields + +The `string` field datatype has been replaced by the `text` field for full +text analyzed content, and the `keyword` field for not-analyzed exact string +values. For backwards compatibility purposes, during the 5.x series: + +* `string` fields on pre-5.0 indices will function as before. +* New `string` fields can be added to pre-5.0 indices as before. +* `text` and `keyword` fields can also be added to pre-5.0 indices. +* When adding a `string` field to a new index, the field mapping will be + rewritten as a `text` or `keyword` field if possible, otherwise + an exception will be thrown. Certain configurations that were possible + with `string` fields are no longer possible with `text`/`keyword` fields + such as enabling `term_vectors` on a not-analyzed `keyword` field. + +==== `index` property + +On all field datatypes (except for the deprecated `string` field), the `index` +property now only accepts `true`/`false` instead of `not_analyzed`/`no`. The +`string` field still accepts `analyzed`/`not_analyzed`/`no`. + +==== Doc values on unindexed fields + +Previously, setting a field to `index:no` would also disable doc-values. Now, +doc-values are always enabled on numeric and boolean fields unless +`doc_values` is set to `false`. + +==== Floating points use `float` instead of `double` + +When dynamically mapping a field containing a floating point number, the field +now defaults to using `float` instead of `double`. The reasoning is that +floats should be more than enough for most cases but would decrease storage +requirements significantly. + +==== `fielddata.format` + +Setting `fielddata.format: doc_values` in the mappings used to implicitly +enable doc-values on a field. This no longer works: the only way to enable or +disable doc-values is by using the `doc_values` property of mappings. + +==== Source-transform removed + +The source `transform` feature has been removed. Instead, use an ingest pipeline + +==== `_parent` field no longer indexed + +The join between parent and child documents no longer relies on indexed fields +and therefore from 5.0.0 onwards the `_parent` field is no longer indexed. In +order to find documents that referrer to a specific parent id the new +`parent_id` query can be used. The GET response and hits inside the search +response still include the parent id under the `_parent` key. + +==== Source `format` option + +The `_source` mapping no longer supports the `format` option. It will still be +accepted for indices created before the upgrade to 5.0 for backwards +compatibility, but it will have no effect. Indices created on or after 5.0 +will reject this option. + +==== Object notation + +Core types no longer support the object notation, which was used to provide +per document boosts as follows: + +[source,json] +--------------- +{ + "value": "field_value", + "boost": 42 +} +--------------- + +==== Boost accuracy for queries on `_all` + +Per-field boosts on the `_all` are now compressed into a single byte instead +of the 4 bytes used previously. While this will make the index much more +space-efficient, it also means that index time boosts will be less accurately +encoded. + diff --git a/docs/reference/migration/migrate_5_0/packaging.asciidoc b/docs/reference/migration/migrate_5_0/packaging.asciidoc new file mode 100644 index 00000000000..9be2d4accac --- /dev/null +++ b/docs/reference/migration/migrate_5_0/packaging.asciidoc @@ -0,0 +1,24 @@ +[[breaking_50_packaging]] +=== Packaging + +==== Default logging using systemd (since Elasticsearch 2.2.0) + +In previous versions of Elasticsearch, the default logging +configuration routed standard output to /dev/null and standard error to +the journal. However, there are often critical error messages at +startup that are logged to standard output rather than standard error +and these error messages would be lost to the nether. The default has +changed to now route standard output to the journal and standard error +to inherit this setting (these are the defaults for systemd). These +settings can be modified by editing the elasticsearch.service file. + +==== Longer startup times + +In Elasticsearch 5.0.0 the `-XX:+AlwaysPreTouch` flag has been added to the JVM +startup options. This option touches all memory pages used by the JVM heap +during initialization of the HotSpot VM to reduce the chance of having to commit +a memory page during GC time. This will increase the startup time of +Elasticsearch as well as increasing the initial resident memory usage of the +Java process. + + diff --git a/docs/reference/migration/migrate_5_0/percolator.asciidoc b/docs/reference/migration/migrate_5_0/percolator.asciidoc new file mode 100644 index 00000000000..3c560182c87 --- /dev/null +++ b/docs/reference/migration/migrate_5_0/percolator.asciidoc @@ -0,0 +1,41 @@ +[[breaking_50_percolator]] +=== Percolator changes + +==== Percolator is near-real time + +Previously percolators were activated in real-time, i.e. as soon as they were +indexed. Now, changes to the percolator query are visible in near-real time, +as soon as the index has been refreshed. This change was required because, in +indices created from 5.0 onwards, the terms used in a percolator query are +automatically indexed to allow for more efficient query selection during +percolation. + +==== Percolator mapping + +The percolate API can no longer accept documents that reference fields that +don't already exist in the mapping. + +The percolate API no longer modifies the mappings. Before the percolate API +could be used to dynamically introduce new fields to the mappings based on the +fields in the document being percolated. This no longer works, because these +unmapped fields are not persisted in the mapping. + +==== Percolator documents returned by search + +Documents with the `.percolate` type were previously excluded from the search +response, unless the `.percolate` type was specified explicitly in the search +request. Now, percolator documents are treated in the same way as any other +document and are returned by search requests. + +==== Percolator `size` default + +The percolator by default sets the `size` option to `10` whereas before this +was unlimited. + +==== Percolate API + +When percolating an existing document then specifying a document in the source +of the percolate request is not allowed any more. + + + diff --git a/docs/reference/migration/migrate_5_0/plugins.asciidoc b/docs/reference/migration/migrate_5_0/plugins.asciidoc new file mode 100644 index 00000000000..10268887417 --- /dev/null +++ b/docs/reference/migration/migrate_5_0/plugins.asciidoc @@ -0,0 +1,99 @@ +[[breaking_50_plugins]] +=== Plugin changes + +The command `bin/plugin` has been renamed to `bin/elasticsearch-plugin`. The +structure of the plugin ZIP archive has changed. All the plugin files must be +contained in a top-level directory called `elasticsearch`. If you use the +gradle build, this structure is automatically generated. + +==== Site plugins removed + +Site plugins have been removed. Site plugins should be reimplemented as Kibana +plugins. + +==== Multicast plugin removed + +Multicast has been removed. Use unicast discovery, or one of the cloud +discovery plugins. + +==== Plugins with custom query implementations + +Plugins implementing custom queries need to implement the `fromXContent(QueryParseContext)` method in their +`QueryParser` subclass rather than `parse`. This method will take care of parsing the query from `XContent` format +into an intermediate query representation that can be streamed between the nodes in binary format, effectively the +query object used in the java api. Also, the query parser needs to implement the `getBuilderPrototype` method that +returns a prototype of the `NamedWriteable` query, which allows to deserialize an incoming query by calling +`readFrom(StreamInput)` against it, which will create a new object, see usages of `Writeable`. The `QueryParser` +also needs to declare the generic type of the query that it supports and it's able to parse. +The query object can then transform itself into a lucene query through the new `toQuery(QueryShardContext)` method, +which returns a lucene query to be executed on the data node. + +Similarly, plugins implementing custom score functions need to implement the `fromXContent(QueryParseContext)` +method in their `ScoreFunctionParser` subclass rather than `parse`. This method will take care of parsing +the function from `XContent` format into an intermediate function representation that can be streamed between +the nodes in binary format, effectively the function object used in the java api. Also, the query parser needs +to implement the `getBuilderPrototype` method that returns a prototype of the `NamedWriteable` function, which +allows to deserialize an incoming function by calling `readFrom(StreamInput)` against it, which will create a +new object, see usages of `Writeable`. The `ScoreFunctionParser` also needs to declare the generic type of the +function that it supports and it's able to parse. The function object can then transform itself into a lucene +function through the new `toFunction(QueryShardContext)` method, which returns a lucene function to be executed +on the data node. + +==== Cloud AWS plugin changes + +Cloud AWS plugin has been split in two plugins: + +* {plugins}/discovery-ec2.html[Discovery EC2 plugin] +* {plugins}/repository-s3.html[Repository S3 plugin] + +Proxy settings for both plugins have been renamed: + +* from `cloud.aws.proxy_host` to `cloud.aws.proxy.host` +* from `cloud.aws.ec2.proxy_host` to `cloud.aws.ec2.proxy.host` +* from `cloud.aws.s3.proxy_host` to `cloud.aws.s3.proxy.host` +* from `cloud.aws.proxy_port` to `cloud.aws.proxy.port` +* from `cloud.aws.ec2.proxy_port` to `cloud.aws.ec2.proxy.port` +* from `cloud.aws.s3.proxy_port` to `cloud.aws.s3.proxy.port` + +==== Cloud Azure plugin changes + +Cloud Azure plugin has been split in three plugins: + +* {plugins}/discovery-azure.html[Discovery Azure plugin] +* {plugins}/repository-azure.html[Repository Azure plugin] +* {plugins}/store-smb.html[Store SMB plugin] + +If you were using the `cloud-azure` plugin for snapshot and restore, you had in `elasticsearch.yml`: + +[source,yaml] +----- +cloud: + azure: + storage: + account: your_azure_storage_account + key: your_azure_storage_key +----- + +You need to give a unique id to the storage details now as you can define multiple storage accounts: + +[source,yaml] +----- +cloud: + azure: + storage: + my_account: + account: your_azure_storage_account + key: your_azure_storage_key +----- + + +==== Cloud GCE plugin changes + +Cloud GCE plugin has been renamed to {plugins}/discovery-gce.html[Discovery GCE plugin]. + + +==== Mapper Attachments plugin deprecated + +Mapper attachments has been deprecated. Users should use now the {plugins}/ingest-attachment.html[`ingest-attachment`] +plugin. + diff --git a/docs/reference/migration/migrate_5_0/rest.asciidoc b/docs/reference/migration/migrate_5_0/rest.asciidoc new file mode 100644 index 00000000000..590a097f021 --- /dev/null +++ b/docs/reference/migration/migrate_5_0/rest.asciidoc @@ -0,0 +1,17 @@ + +[[breaking_50_rest_api_changes]] +=== REST API changes + +==== id values longer than 512 bytes are rejected + +When specifying an `_id` value longer than 512 bytes, the request will be +rejected. + +==== `/_optimize` endpoint removed + +The deprecated `/_optimize` endpoint has been removed. The `/_forcemerge` +endpoint should be used in lieu of optimize. + +The `GET` HTTP verb for `/_forcemerge` is no longer supported, please use the +`POST` HTTP verb. + diff --git a/docs/reference/migration/migrate_5_0/search.asciidoc b/docs/reference/migration/migrate_5_0/search.asciidoc new file mode 100644 index 00000000000..48807bf187a --- /dev/null +++ b/docs/reference/migration/migrate_5_0/search.asciidoc @@ -0,0 +1,141 @@ +[[breaking_50_search_changes]] +=== Search and Query DSL changes + +==== `search_type` + +===== `search_type=count` removed + +The `count` search type was deprecated since version 2.0.0 and is now removed. +In order to get the same benefits, you just need to set the value of the `size` +parameter to `0`. + +For instance, the following request: + +[source,sh] +--------------- +GET /my_index/_search?search_type=count +{ + "aggs": { + "my_terms": { + "terms": { + "field": "foo" + } + } + } +} +--------------- + +can be replaced with: + +[source,sh] +--------------- +GET /my_index/_search +{ + "size": 0, + "aggs": { + "my_terms": { + "terms": { + "field": "foo" + } + } + } +} +--------------- + +===== `search_type=scan` removed + +The `scan` search type was deprecated since version 2.1.0 and is now removed. +All benefits from this search type can now be achieved by doing a scroll +request that sorts documents in `_doc` order, for instance: + +[source,sh] +--------------- +GET /my_index/_search?scroll=2m +{ + "sort": [ + "_doc" + ] +} +--------------- + +Scroll requests sorted by `_doc` have been optimized to more efficiently resume +from where the previous request stopped, so this will have the same performance +characteristics as the former `scan` search type. + +==== `fields` parameter + +The `fields` parameter used to try to retrieve field values from stored +fields, and fall back to extracting from the `_source` if a field is not +marked as stored. Now, the `fields` parameter will only return stored fields +-- it will no longer extract values from the `_source`. + +==== search-exists API removed + +The search exists api has been removed in favour of using the search api with +`size` set to `0` and `terminate_after` set to `1`. + + +==== Deprecated queries removed + +The following deprecated queries have been removed: + +`filtered`:: Use `bool` query instead, which supports `filter` clauses too. +`and`:: Use `must` clauses in a `bool` query instead. +`or`:: Use `should` clauses in a `bool` query instead. +`limit`:: Use the `terminate_after` parameter instead. +`fquery`:: Is obsolete after filters and queries have been merged. +`query`:: Is obsolete after filters and queries have been merged. +`query_binary`:: Was undocumented and has been removed. +`filter_binary`:: Was undocumented and has been removed. + + +==== Changes to queries + +* Removed support for the deprecated `min_similarity` parameter in `fuzzy + query`, in favour of `fuzziness`. + +* Removed support for the deprecated `fuzzy_min_sim` parameter in + `query_string` query, in favour of `fuzziness`. + +* Removed support for the deprecated `edit_distance` parameter in completion + suggester, in favour of `fuzziness`. + +* Removed support for the deprecated `filter` and `no_match_filter` fields in `indices` query, +in favour of `query` and `no_match_query`. + +* Removed support for the deprecated `filter` fields in `nested` query, in favour of `query`. + +* Removed support for the deprecated `minimum_should_match` and + `disable_coord` in `terms` query, use `bool` query instead. Also removed + support for the deprecated `execution` parameter. + +* Removed support for the top level `filter` element in `function_score` query, replaced by `query`. + +* The `collect_payloads` parameter of the `span_near` query has been deprecated. Payloads will be loaded when needed. + +* The `score_type` parameter to the `has_child` and `has_parent` queries has been removed in favour of `score_mode`. + Also, the `sum` score mode has been removed in favour of the `total` mode. + +* When the `max_children` parameter was set to `0` on the `has_child` query + then there was no upper limit on how many child documents were allowed to + match. Now, `0` really means that zero child documents are allowed. If no + upper limit is needed then the `max_children` parameter shouldn't be specified + at all. + + +==== Top level `filter` parameter + +Removed support for the deprecated top level `filter` in the search api, +replaced by `post_filter`. + +==== Highlighters + +Removed support for multiple highlighter names, the only supported ones are: +`plain`, `fvh` and `postings`. + +==== Term vectors API + +The term vectors APIs no longer persist unmapped fields in the mappings. + +The `dfs` parameter to the term vectors API has been removed completely. Term +vectors don't support distributed document frequencies anymore. diff --git a/docs/reference/migration/migrate_5_0/settings.asciidoc b/docs/reference/migration/migrate_5_0/settings.asciidoc new file mode 100644 index 00000000000..002d6cf05df --- /dev/null +++ b/docs/reference/migration/migrate_5_0/settings.asciidoc @@ -0,0 +1,174 @@ +[[breaking_50_settings_changes]] +=== Settings changes + +From Elasticsearch 5.0 on all settings are validated before they are applied. +Node level and default index level settings are validated on node startup, +dynamic cluster and index setting are validated before they are updated/added +to the cluster state. + +Every setting must be a *known* setting. All settings must have been +registered with the node or transport client they are used with. This implies +that plugins that define custom settings must register all of their settings +during plugin loading using the `SettingsModule#registerSettings(Setting)` +method. + +==== Node settings + +The `name` setting has been removed and is replaced by `node.name`. Usage of +`-Dname=some_node_name` is not supported anymore. + +==== Transport Settings + +All settings with a `netty` infix have been replaced by their already existing +`transport` synonyms. For instance `transport.netty.bind_host` is no longer +supported and should be replaced by the superseding setting +`transport.bind_host`. + +==== Script mode settings + +Previously script mode settings (e.g., "script.inline: true", +"script.engine.groovy.inline.aggs: false", etc.) accepted the values +`on`, `true`, `1`, and `yes` for enabling a scripting mode, and the +values `off`, `false`, `0`, and `no` for disabling a scripting mode. +The variants `on`, `1`, and `yes ` for enabling and `off`, `0`, +and `no` for disabling are no longer supported. + + +==== Security manager settings + +The option to disable the security manager `security.manager.enabled` has been +removed. In order to grant special permissions to elasticsearch users must +edit the local Java Security Policy. + +==== Network settings + +The `_non_loopback_` value for settings like `network.host` would arbitrarily +pick the first interface not marked as loopback. Instead, specify by address +scope (e.g. `_local_,_site_` for all loopback and private network addresses) +or by explicit interface names, hostnames, or addresses. + +==== Forbid changing of thread pool types + +Previously, <> could be dynamically +adjusted. The thread pool type effectively controls the backing queue for the +thread pool and modifying this is an expert setting with minimal practical +benefits and high risk of being misused. The ability to change the thread pool +type for any thread pool has been removed. It is still possible to adjust +relevant thread pool parameters for each of the thread pools (e.g., depending +on the thread pool type, `keep_alive`, `queue_size`, etc.). + + +==== Analysis settings + +The `index.analysis.analyzer.default_index` analyzer is not supported anymore. +If you wish to change the analyzer to use for indexing, change the +`index.analysis.analyzer.default` analyzer instead. + +==== Ping timeout settings + +Previously, there were three settings for the ping timeout: +`discovery.zen.initial_ping_timeout`, `discovery.zen.ping.timeout` and +`discovery.zen.ping_timeout`. The former two have been removed and the only +setting key for the ping timeout is now `discovery.zen.ping_timeout`. The +default value for ping timeouts remains at three seconds. + +==== Recovery settings + +Recovery settings deprecated in 1.x have been removed: + + * `index.shard.recovery.translog_size` is superseded by `indices.recovery.translog_size` + * `index.shard.recovery.translog_ops` is superseded by `indices.recovery.translog_ops` + * `index.shard.recovery.file_chunk_size` is superseded by `indices.recovery.file_chunk_size` + * `index.shard.recovery.concurrent_streams` is superseded by `indices.recovery.concurrent_streams` + * `index.shard.recovery.concurrent_small_file_streams` is superseded by `indices.recovery.concurrent_small_file_streams` + * `indices.recovery.max_size_per_sec` is superseded by `indices.recovery.max_bytes_per_sec` + +If you are using any of these settings please take the time to review their +purpose. All of the settings above are considered _expert settings_ and should +only be used if absolutely necessary. If you have set any of the above setting +as persistent cluster settings please use the settings update API and set +their superseded keys accordingly. + +The following settings have been removed without replacement + + * `indices.recovery.concurrent_small_file_streams` - recoveries are now single threaded. The number of concurrent outgoing recoveries are throttled via allocation deciders + * `indices.recovery.concurrent_file_streams` - recoveries are now single threaded. The number of concurrent outgoing recoveries are throttled via allocation deciders + +==== Translog settings + +The `index.translog.flush_threshold_ops` setting is not supported anymore. In +order to control flushes based on the transaction log growth use +`index.translog.flush_threshold_size` instead. + +Changing the translog type with `index.translog.fs.type` is not supported +anymore, the `buffered` implementation is now the only available option and +uses a fixed `8kb` buffer. + +The translog by default is fsynced after every `index`, `create`, `update`, +`delete`, or `bulk` request. The ability to fsync on every operation is not +necessary anymore. In fact, it can be a performance bottleneck and it's trappy +since it enabled by a special value set on `index.translog.sync_interval`. +Now, `index.translog.sync_interval` doesn't accept a value less than `100ms` +which prevents fsyncing too often if async durability is enabled. The special +value `0` is no longer supported. + +==== Request Cache Settings + +The deprecated settings `index.cache.query.enable` and +`indices.cache.query.size` have been removed and are replaced with +`index.requests.cache.enable` and `indices.requests.cache.size` respectively. + +`indices.requests.cache.clean_interval has been replaced with +`indices.cache.clean_interval and is no longer supported. + +==== Field Data Cache Settings + +The `indices.fielddata.cache.clean_interval` setting has been replaced with +`indices.cache.clean_interval`. + +==== Allocation settings + +The `cluster.routing.allocation.concurrent_recoveries` setting has been +replaced with `cluster.routing.allocation.node_concurrent_recoveries`. + +==== Similarity settings + +The 'default' similarity has been renamed to 'classic'. + +==== Indexing settings + +The `indices.memory.min_shard_index_buffer_size` and +`indices.memory.max_shard_index_buffer_size` have been removed as +Elasticsearch now allows any one shard to use amount of heap as long as the +total indexing buffer heap used across all shards is below the node's +`indices.memory.index_buffer_size` (defaults to 10% of the JVM heap). + +==== Removed es.max-open-files + +Setting the system property es.max-open-files to true to get +Elasticsearch to print the number of maximum open files for the +Elasticsearch process has been removed. This same information can be +obtained from the <> API, and a warning is logged +on startup if it is set too low. + +==== Removed es.netty.gathering + +Disabling Netty from using NIO gathering could be done via the escape +hatch of setting the system property "es.netty.gathering" to "false". +Time has proven enabling gathering by default is a non-issue and this +non-documented setting has been removed. + +==== Removed es.useLinkedTransferQueue + +The system property `es.useLinkedTransferQueue` could be used to +control the queue implementation used in the cluster service and the +handling of ping responses during discovery. This was an undocumented +setting and has been removed. + +==== Cache concurrency level settings removed + +Two cache concurrency level settings +`indices.requests.cache.concurrency_level` and +`indices.fielddata.cache.concurrency_level` because they no longer apply to +the cache implementation used for the request cache and the field data cache. + diff --git a/docs/reference/query-dsl/regexp-syntax.asciidoc b/docs/reference/query-dsl/regexp-syntax.asciidoc index e57d0e1c779..68ca5912458 100644 --- a/docs/reference/query-dsl/regexp-syntax.asciidoc +++ b/docs/reference/query-dsl/regexp-syntax.asciidoc @@ -220,12 +220,20 @@ Complement:: -- The complement is probably the most useful option. The shortest pattern that -follows a tilde `"~"` is negated. For the string `"abcdef"`: +follows a tilde `"~"` is negated. For instance, `"ab~cd" means: + +* Starts with `a` +* Followed by `b` +* Followed by a string of any length that it anything but `c` +* Ends with `d` + +For the string `"abcdef"`: ab~df # match - ab~cf # no match - a~(cd)f # match - a~(bc)f # no match + ab~cf # match + ab~cdef # no match + a~(cb)def # match + a~(bc)def # no match Enabled with the `COMPLEMENT` or `ALL` flags. diff --git a/modules/lang-expression/licenses/lucene-expressions-6.0.0-snapshot-bea235f.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-6.0.0-snapshot-bea235f.jar.sha1 deleted file mode 100644 index d9a29f17c50..00000000000 --- a/modules/lang-expression/licenses/lucene-expressions-6.0.0-snapshot-bea235f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8d11bf581b0afc25f87a57c06834cd85930d2ffa \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-6.0.0-snapshot-f0aa4fc.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-6.0.0-snapshot-f0aa4fc.jar.sha1 new file mode 100644 index 00000000000..5237907f224 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-6.0.0-snapshot-f0aa4fc.jar.sha1 @@ -0,0 +1 @@ +f36f8010c9fec7342d34bece819c13de5f241135 \ No newline at end of file diff --git a/modules/lang-groovy/build.gradle b/modules/lang-groovy/build.gradle index 89444a4e926..2160210ba73 100644 --- a/modules/lang-groovy/build.gradle +++ b/modules/lang-groovy/build.gradle @@ -38,6 +38,17 @@ thirdPartyAudit.excludes = [ // for example we do not need ivy, scripts arent allowed to download code 'com.thoughtworks.xstream.XStream', 'groovyjarjarasm.asm.util.Textifiable', + // commons-cli is referenced by groovy, even though they supposedly + // jarjar it. Since we don't use the cli, we don't need the dep. + 'org.apache.commons.cli.CommandLine', + 'org.apache.commons.cli.CommandLineParser', + 'org.apache.commons.cli.GnuParser', + 'org.apache.commons.cli.HelpFormatter', + 'org.apache.commons.cli.Option', + 'org.apache.commons.cli.OptionBuilder', + 'org.apache.commons.cli.Options', + 'org.apache.commons.cli.Parser', + 'org.apache.commons.cli.PosixParser', 'org.apache.ivy.Ivy', 'org.apache.ivy.core.event.IvyListener', 'org.apache.ivy.core.event.download.PrepareDownloadEvent', diff --git a/modules/lang-groovy/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java b/modules/lang-groovy/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java index 4d9e7a4b57b..60a8a0c1338 100644 --- a/modules/lang-groovy/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java +++ b/modules/lang-groovy/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java @@ -316,7 +316,7 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri }); } catch (Throwable e) { if (logger.isTraceEnabled()) { - logger.trace("failed to run " + compiledScript, e); + logger.trace("failed to run {}", e, compiledScript); } throw new ScriptException("failed to run " + compiledScript, e); } diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/EquivalenceTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/EquivalenceTests.java index 2fb0f9f6327..6f83746d4ce 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/EquivalenceTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/EquivalenceTests.java @@ -332,7 +332,7 @@ public class EquivalenceTests extends ESIntegTestCase { createIndex("idx"); final int numDocs = scaledRandomIntBetween(2500, 5000); - logger.info("Indexing [" + numDocs +"] docs"); + logger.info("Indexing [{}] docs", numDocs); List indexingRequests = new ArrayList<>(); for (int i = 0; i < numDocs; ++i) { indexingRequests.add(client().prepareIndex("idx", "type", Integer.toString(i)).setSource("double_value", randomDouble())); diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/ExtendedStatsTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/ExtendedStatsTests.java index e717ea6d6fb..4642d4662c9 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/ExtendedStatsTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/ExtendedStatsTests.java @@ -543,7 +543,7 @@ public class ExtendedStatsTests extends AbstractNumericTestCase { ShardSearchFailure[] failures = response.getShardFailures(); if (failures.length != expectedFailures) { for (ShardSearchFailure failure : failures) { - logger.error("Shard Failure: {}", failure.reason(), failure.toString()); + logger.error("Shard Failure: {}", failure.getCause(), failure); } fail("Unexpected shard failures!"); } diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SearchStatsTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SearchStatsTests.java index 72abe487d89..7838bb58f8e 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SearchStatsTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SearchStatsTests.java @@ -116,7 +116,7 @@ public class SearchStatsTests extends ESIntegTestCase { } IndicesStatsResponse indicesStats = client().admin().indices().prepareStats().execute().actionGet(); - logger.debug("###### indices search stats: " + indicesStats.getTotal().getSearch()); + logger.debug("###### indices search stats: {}", indicesStats.getTotal().getSearch()); assertThat(indicesStats.getTotal().getSearch().getTotal().getQueryCount(), greaterThan(0L)); assertThat(indicesStats.getTotal().getSearch().getTotal().getQueryTimeInMillis(), greaterThan(0L)); assertThat(indicesStats.getTotal().getSearch().getTotal().getFetchCount(), greaterThan(0L)); diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/StatsTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/StatsTests.java index 4934d2ae6c4..b06d3395b2b 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/StatsTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/StatsTests.java @@ -404,10 +404,10 @@ public class StatsTests extends AbstractNumericTestCase { ShardSearchFailure[] failures = response.getShardFailures(); if (failures.length != expectedFailures) { for (ShardSearchFailure failure : failures) { - logger.error("Shard Failure: {}", failure.reason(), failure.toString()); + logger.error("Shard Failure: {}", failure.getCause(), failure); } fail("Unexpected shard failures!"); } assertThat("Not all shards are initialized", response.getSuccessfulShards(), equalTo(response.getTotalShards())); } -} \ No newline at end of file +} diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/StringTermsTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/StringTermsTests.java index edf8be49ddc..346d19d4ce5 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/StringTermsTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/StringTermsTests.java @@ -243,7 +243,7 @@ public class StringTermsTests extends AbstractTermsTestCase { ExecutionMode[] executionModes = new ExecutionMode[] { null, ExecutionMode.GLOBAL_ORDINALS, ExecutionMode.GLOBAL_ORDINALS_HASH, ExecutionMode.GLOBAL_ORDINALS_LOW_CARDINALITY }; for (ExecutionMode executionMode : executionModes) { - logger.info("Execution mode:" + executionMode); + logger.info("Execution mode: {}", executionMode); SearchResponse response = client() .prepareSearch("idx") .setTypes("type") diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java index 766c5bff9c4..647a727b2dd 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java @@ -191,7 +191,7 @@ public final class MustacheScriptEngineService extends AbstractComponent impleme } }); } catch (Exception e) { - logger.error("Error running " + template, e); + logger.error("Error running {}", e, template); throw new ScriptException("Error running " + template, e); } return result.bytes(); diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.0.0-snapshot-bea235f.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.0.0-snapshot-bea235f.jar.sha1 deleted file mode 100644 index 538d2ad8216..00000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.0.0-snapshot-bea235f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -38fda9b86e4f68eb6c9d31fb636a2540da219927 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.0.0-snapshot-f0aa4fc.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.0.0-snapshot-f0aa4fc.jar.sha1 new file mode 100644 index 00000000000..f2e307d5d98 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.0.0-snapshot-f0aa4fc.jar.sha1 @@ -0,0 +1 @@ +1378905632ff45a9887b267c4b30f7adef415ca4 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.0.0-snapshot-bea235f.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.0.0-snapshot-bea235f.jar.sha1 deleted file mode 100644 index b90115da4ab..00000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.0.0-snapshot-bea235f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -352fea7a169ada6a7ae18e4ec34559496e09b465 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.0.0-snapshot-f0aa4fc.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.0.0-snapshot-f0aa4fc.jar.sha1 new file mode 100644 index 00000000000..7bf3eb5333d --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.0.0-snapshot-f0aa4fc.jar.sha1 @@ -0,0 +1 @@ +49acd38e206d9c2fe28269fcba9b752d3b605e0e \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.0.0-snapshot-bea235f.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.0.0-snapshot-bea235f.jar.sha1 deleted file mode 100644 index 7cbe648e0bd..00000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.0.0-snapshot-bea235f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -445f5ea7822d0dd6b91364ec119cd6cb4635d285 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.0.0-snapshot-f0aa4fc.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.0.0-snapshot-f0aa4fc.jar.sha1 new file mode 100644 index 00000000000..8f08fe26980 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.0.0-snapshot-f0aa4fc.jar.sha1 @@ -0,0 +1 @@ +7c11723d7d4dc3b1c9bf80089cfc2de7bc8a2b6e \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.0.0-snapshot-bea235f.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.0.0-snapshot-bea235f.jar.sha1 deleted file mode 100644 index 03c96786de2..00000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.0.0-snapshot-bea235f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0b216b7b9ff583bc1382edc8adfee4d4acd02859 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.0.0-snapshot-f0aa4fc.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.0.0-snapshot-f0aa4fc.jar.sha1 new file mode 100644 index 00000000000..bf5e5da8dcf --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.0.0-snapshot-f0aa4fc.jar.sha1 @@ -0,0 +1 @@ +654d961bd4975a3cb13388d86d72fefb6994f659 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.0.0-snapshot-bea235f.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.0.0-snapshot-bea235f.jar.sha1 deleted file mode 100644 index f27a98f63ba..00000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.0.0-snapshot-bea235f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8d161a8c7e5b5b82f64dc5df2ca46197a3716672 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.0.0-snapshot-f0aa4fc.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.0.0-snapshot-f0aa4fc.jar.sha1 new file mode 100644 index 00000000000..ed0dc51b97c --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.0.0-snapshot-f0aa4fc.jar.sha1 @@ -0,0 +1 @@ +0f408ac498782617a0f80d6a295d82f6d3609499 \ No newline at end of file diff --git a/plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeService.java b/plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeService.java index 0c665c138b8..acc1e76bde4 100644 --- a/plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeService.java +++ b/plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeService.java @@ -22,26 +22,36 @@ package org.elasticsearch.cloud.azure.management; import com.microsoft.windowsazure.core.utils.KeyStoreType; import com.microsoft.windowsazure.management.compute.models.HostedServiceGetDetailedResponse; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.azure.AzureUnicastHostsProvider; public interface AzureComputeService { final class Management { - public static final Setting SUBSCRIPTION_ID_SETTING = Setting.simpleString("cloud.azure.management.subscription.id", false, Setting.Scope.CLUSTER); - public static final Setting SERVICE_NAME_SETTING = Setting.simpleString("cloud.azure.management.cloud.service.name", false, Setting.Scope.CLUSTER); + public static final Setting SUBSCRIPTION_ID_SETTING = + Setting.simpleString("cloud.azure.management.subscription.id", Property.NodeScope, Property.Filtered); + public static final Setting SERVICE_NAME_SETTING = + Setting.simpleString("cloud.azure.management.cloud.service.name", Property.NodeScope); // Keystore settings - public static final Setting KEYSTORE_PATH_SETTING = Setting.simpleString("cloud.azure.management.keystore.path", false, Setting.Scope.CLUSTER); - public static final Setting KEYSTORE_PASSWORD_SETTING = Setting.simpleString("cloud.azure.management.keystore.password", false, Setting.Scope.CLUSTER); - public static final Setting KEYSTORE_TYPE_SETTING = new Setting<>("cloud.azure.management.keystore.type", KeyStoreType.pkcs12.name(), KeyStoreType::fromString, false, Setting.Scope.CLUSTER); + public static final Setting KEYSTORE_PATH_SETTING = + Setting.simpleString("cloud.azure.management.keystore.path", Property.NodeScope, Property.Filtered); + public static final Setting KEYSTORE_PASSWORD_SETTING = + Setting.simpleString("cloud.azure.management.keystore.password", Property.NodeScope, + Property.Filtered); + public static final Setting KEYSTORE_TYPE_SETTING = + new Setting<>("cloud.azure.management.keystore.type", KeyStoreType.pkcs12.name(), KeyStoreType::fromString, + Property.NodeScope, Property.Filtered); } final class Discovery { - public static final Setting REFRESH_SETTING = Setting.positiveTimeSetting("discovery.azure.refresh_interval", TimeValue.timeValueSeconds(0), false, Setting.Scope.CLUSTER); + public static final Setting REFRESH_SETTING = + Setting.positiveTimeSetting("discovery.azure.refresh_interval", TimeValue.timeValueSeconds(0), Property.NodeScope); - public static final Setting HOST_TYPE_SETTING = new Setting<>("discovery.azure.host.type", - AzureUnicastHostsProvider.HostType.PRIVATE_IP.name(), AzureUnicastHostsProvider.HostType::fromString, false, Setting.Scope.CLUSTER); + public static final Setting HOST_TYPE_SETTING = + new Setting<>("discovery.azure.host.type", AzureUnicastHostsProvider.HostType.PRIVATE_IP.name(), + AzureUnicastHostsProvider.HostType::fromString, Property.NodeScope); public static final String ENDPOINT_NAME = "discovery.azure.endpoint.name"; public static final String DEPLOYMENT_NAME = "discovery.azure.deployment.name"; diff --git a/plugins/discovery-azure/src/main/java/org/elasticsearch/plugin/discovery/azure/AzureDiscoveryPlugin.java b/plugins/discovery-azure/src/main/java/org/elasticsearch/plugin/discovery/azure/AzureDiscoveryPlugin.java index b2b94b6c3bb..e5de7a0cb02 100644 --- a/plugins/discovery-azure/src/main/java/org/elasticsearch/plugin/discovery/azure/AzureDiscoveryPlugin.java +++ b/plugins/discovery-azure/src/main/java/org/elasticsearch/plugin/discovery/azure/AzureDiscoveryPlugin.java @@ -75,10 +75,5 @@ public class AzureDiscoveryPlugin extends Plugin { settingsModule.registerSetting(AzureComputeService.Management.SUBSCRIPTION_ID_SETTING); settingsModule.registerSetting(AzureComputeService.Management.SERVICE_NAME_SETTING); settingsModule.registerSetting(AzureComputeService.Discovery.HOST_TYPE_SETTING); - // Cloud management API settings we need to hide - settingsModule.registerSettingsFilter(AzureComputeService.Management.KEYSTORE_PATH_SETTING.getKey()); - settingsModule.registerSettingsFilter(AzureComputeService.Management.KEYSTORE_PASSWORD_SETTING.getKey()); - settingsModule.registerSettingsFilter(AzureComputeService.Management.KEYSTORE_TYPE_SETTING.getKey()); - settingsModule.registerSettingsFilter(AzureComputeService.Management.SUBSCRIPTION_ID_SETTING.getKey()); } } diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2Service.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2Service.java index a90d3573468..8cfe6c43108 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2Service.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2Service.java @@ -22,6 +22,7 @@ package org.elasticsearch.cloud.aws; import com.amazonaws.Protocol; import com.amazonaws.services.ec2.AmazonEC2; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -32,7 +33,7 @@ import java.util.Locale; import java.util.function.Function; public interface AwsEc2Service { - Setting AUTO_ATTRIBUTE_SETTING = Setting.boolSetting("cloud.node.auto_attributes", false, false, Setting.Scope.CLUSTER); + Setting AUTO_ATTRIBUTE_SETTING = Setting.boolSetting("cloud.node.auto_attributes", false, Property.NodeScope); // Global AWS settings (shared between discovery-ec2 and repository-s3) // Each setting starting with `cloud.aws` also exists in repository-s3 project. Don't forget to update @@ -40,40 +41,44 @@ public interface AwsEc2Service { /** * cloud.aws.access_key: AWS Access key. Shared with repository-s3 plugin */ - Setting KEY_SETTING = Setting.simpleString("cloud.aws.access_key", false, Setting.Scope.CLUSTER); + Setting KEY_SETTING = + Setting.simpleString("cloud.aws.access_key", Property.NodeScope, Property.Filtered); /** * cloud.aws.secret_key: AWS Secret key. Shared with repository-s3 plugin */ - Setting SECRET_SETTING = Setting.simpleString("cloud.aws.secret_key", false, Setting.Scope.CLUSTER); + Setting SECRET_SETTING = + Setting.simpleString("cloud.aws.secret_key", Property.NodeScope, Property.Filtered); /** * cloud.aws.protocol: Protocol for AWS API: http or https. Defaults to https. Shared with repository-s3 plugin */ Setting PROTOCOL_SETTING = new Setting<>("cloud.aws.protocol", "https", s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), - false, Setting.Scope.CLUSTER); + Property.NodeScope); /** * cloud.aws.proxy.host: In case of proxy, define its hostname/IP. Shared with repository-s3 plugin */ - Setting PROXY_HOST_SETTING = Setting.simpleString("cloud.aws.proxy.host", false, Setting.Scope.CLUSTER); + Setting PROXY_HOST_SETTING = Setting.simpleString("cloud.aws.proxy.host", Property.NodeScope); /** * cloud.aws.proxy.port: In case of proxy, define its port. Defaults to 80. Shared with repository-s3 plugin */ - Setting PROXY_PORT_SETTING = Setting.intSetting("cloud.aws.proxy.port", 80, 0, 1<<16, false, Setting.Scope.CLUSTER); + Setting PROXY_PORT_SETTING = Setting.intSetting("cloud.aws.proxy.port", 80, 0, 1<<16, Property.NodeScope); /** * cloud.aws.proxy.username: In case of proxy with auth, define the username. Shared with repository-s3 plugin */ - Setting PROXY_USERNAME_SETTING = Setting.simpleString("cloud.aws.proxy.username", false, Setting.Scope.CLUSTER); + Setting PROXY_USERNAME_SETTING = Setting.simpleString("cloud.aws.proxy.username", Property.NodeScope); /** * cloud.aws.proxy.password: In case of proxy with auth, define the password. Shared with repository-s3 plugin */ - Setting PROXY_PASSWORD_SETTING = Setting.simpleString("cloud.aws.proxy.password", false, Setting.Scope.CLUSTER); + Setting PROXY_PASSWORD_SETTING = + Setting.simpleString("cloud.aws.proxy.password", Property.NodeScope, Property.Filtered); /** * cloud.aws.signer: If you are using an old AWS API version, you can define a Signer. Shared with repository-s3 plugin */ - Setting SIGNER_SETTING = Setting.simpleString("cloud.aws.signer", false, Setting.Scope.CLUSTER); + Setting SIGNER_SETTING = Setting.simpleString("cloud.aws.signer", Property.NodeScope); /** * cloud.aws.region: Region. Shared with repository-s3 plugin */ - Setting REGION_SETTING = new Setting<>("cloud.aws.region", "", s -> s.toLowerCase(Locale.ROOT), false, Setting.Scope.CLUSTER); + Setting REGION_SETTING = + new Setting<>("cloud.aws.region", "", s -> s.toLowerCase(Locale.ROOT), Property.NodeScope); /** * Defines specific ec2 settings starting with cloud.aws.ec2. @@ -83,63 +88,63 @@ public interface AwsEc2Service { * cloud.aws.ec2.access_key: AWS Access key specific for EC2 API calls. Defaults to cloud.aws.access_key. * @see AwsEc2Service#KEY_SETTING */ - Setting KEY_SETTING = new Setting<>("cloud.aws.ec2.access_key", AwsEc2Service.KEY_SETTING, Function.identity(), false, - Setting.Scope.CLUSTER); + Setting KEY_SETTING = new Setting<>("cloud.aws.ec2.access_key", AwsEc2Service.KEY_SETTING, Function.identity(), + Property.NodeScope, Property.Filtered); /** * cloud.aws.ec2.secret_key: AWS Secret key specific for EC2 API calls. Defaults to cloud.aws.secret_key. * @see AwsEc2Service#SECRET_SETTING */ - Setting SECRET_SETTING = new Setting<>("cloud.aws.ec2.secret_key", AwsEc2Service.SECRET_SETTING, Function.identity(), false, - Setting.Scope.CLUSTER); + Setting SECRET_SETTING = new Setting<>("cloud.aws.ec2.secret_key", AwsEc2Service.SECRET_SETTING, Function.identity(), + Property.NodeScope, Property.Filtered); /** * cloud.aws.ec2.protocol: Protocol for AWS API specific for EC2 API calls: http or https. Defaults to cloud.aws.protocol. * @see AwsEc2Service#PROTOCOL_SETTING */ Setting PROTOCOL_SETTING = new Setting<>("cloud.aws.ec2.protocol", AwsEc2Service.PROTOCOL_SETTING, - s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), false, Setting.Scope.CLUSTER); + s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), Property.NodeScope); /** * cloud.aws.ec2.proxy.host: In case of proxy, define its hostname/IP specific for EC2 API calls. Defaults to cloud.aws.proxy.host. * @see AwsEc2Service#PROXY_HOST_SETTING */ Setting PROXY_HOST_SETTING = new Setting<>("cloud.aws.ec2.proxy.host", AwsEc2Service.PROXY_HOST_SETTING, - Function.identity(), false, Setting.Scope.CLUSTER); + Function.identity(), Property.NodeScope); /** * cloud.aws.ec2.proxy.port: In case of proxy, define its port specific for EC2 API calls. Defaults to cloud.aws.proxy.port. * @see AwsEc2Service#PROXY_PORT_SETTING */ Setting PROXY_PORT_SETTING = new Setting<>("cloud.aws.ec2.proxy.port", AwsEc2Service.PROXY_PORT_SETTING, - s -> Setting.parseInt(s, 0, 1<<16, "cloud.aws.ec2.proxy.port"), false, Setting.Scope.CLUSTER); + s -> Setting.parseInt(s, 0, 1<<16, "cloud.aws.ec2.proxy.port"), Property.NodeScope); /** * cloud.aws.ec2.proxy.username: In case of proxy with auth, define the username specific for EC2 API calls. * Defaults to cloud.aws.proxy.username. * @see AwsEc2Service#PROXY_USERNAME_SETTING */ Setting PROXY_USERNAME_SETTING = new Setting<>("cloud.aws.ec2.proxy.username", AwsEc2Service.PROXY_USERNAME_SETTING, - Function.identity(), false, Setting.Scope.CLUSTER); + Function.identity(), Property.NodeScope); /** * cloud.aws.ec2.proxy.password: In case of proxy with auth, define the password specific for EC2 API calls. * Defaults to cloud.aws.proxy.password. * @see AwsEc2Service#PROXY_PASSWORD_SETTING */ Setting PROXY_PASSWORD_SETTING = new Setting<>("cloud.aws.ec2.proxy.password", AwsEc2Service.PROXY_PASSWORD_SETTING, - Function.identity(), false, Setting.Scope.CLUSTER); + Function.identity(), Property.NodeScope, Property.Filtered); /** * cloud.aws.ec2.signer: If you are using an old AWS API version, you can define a Signer. Specific for EC2 API calls. * Defaults to cloud.aws.signer. * @see AwsEc2Service#SIGNER_SETTING */ Setting SIGNER_SETTING = new Setting<>("cloud.aws.ec2.signer", AwsEc2Service.SIGNER_SETTING, Function.identity(), - false, Setting.Scope.CLUSTER); + Property.NodeScope); /** * cloud.aws.ec2.region: Region specific for EC2 API calls. Defaults to cloud.aws.region. * @see AwsEc2Service#REGION_SETTING */ Setting REGION_SETTING = new Setting<>("cloud.aws.ec2.region", AwsEc2Service.REGION_SETTING, - s -> s.toLowerCase(Locale.ROOT), false, Setting.Scope.CLUSTER); + s -> s.toLowerCase(Locale.ROOT), Property.NodeScope); /** * cloud.aws.ec2.endpoint: Endpoint. If not set, endpoint will be guessed based on region setting. */ - Setting ENDPOINT_SETTING = Setting.simpleString("cloud.aws.ec2.endpoint", false, Setting.Scope.CLUSTER); + Setting ENDPOINT_SETTING = Setting.simpleString("cloud.aws.ec2.endpoint", Property.NodeScope); } /** @@ -158,32 +163,32 @@ public interface AwsEc2Service { * Can be one of private_ip, public_ip, private_dns, public_dns. Defaults to private_ip. */ Setting HOST_TYPE_SETTING = - new Setting<>("discovery.ec2.host_type", HostType.PRIVATE_IP.name(), s -> HostType.valueOf(s.toUpperCase(Locale.ROOT)), false, - Setting.Scope.CLUSTER); + new Setting<>("discovery.ec2.host_type", HostType.PRIVATE_IP.name(), s -> HostType.valueOf(s.toUpperCase(Locale.ROOT)), + Property.NodeScope); /** * discovery.ec2.any_group: If set to false, will require all security groups to be present for the instance to be used for the * discovery. Defaults to true. */ Setting ANY_GROUP_SETTING = - Setting.boolSetting("discovery.ec2.any_group", true, false, Setting.Scope.CLUSTER); + Setting.boolSetting("discovery.ec2.any_group", true, Property.NodeScope); /** * discovery.ec2.groups: Either a comma separated list or array based list of (security) groups. Only instances with the provided * security groups will be used in the cluster discovery. (NOTE: You could provide either group NAME or group ID.) */ Setting> GROUPS_SETTING = - Setting.listSetting("discovery.ec2.groups", new ArrayList<>(), s -> s.toString(), false, Setting.Scope.CLUSTER); + Setting.listSetting("discovery.ec2.groups", new ArrayList<>(), s -> s.toString(), Property.NodeScope); /** * discovery.ec2.availability_zones: Either a comma separated list or array based list of availability zones. Only instances within * the provided availability zones will be used in the cluster discovery. */ Setting> AVAILABILITY_ZONES_SETTING = - Setting.listSetting("discovery.ec2.availability_zones", Collections.emptyList(), s -> s.toString(), false, - Setting.Scope.CLUSTER); + Setting.listSetting("discovery.ec2.availability_zones", Collections.emptyList(), s -> s.toString(), + Property.NodeScope); /** * discovery.ec2.node_cache_time: How long the list of hosts is cached to prevent further requests to the AWS API. Defaults to 10s. */ Setting NODE_CACHE_TIME_SETTING = - Setting.timeSetting("discovery.ec2.node_cache_time", TimeValue.timeValueSeconds(10), false, Setting.Scope.CLUSTER); + Setting.timeSetting("discovery.ec2.node_cache_time", TimeValue.timeValueSeconds(10), Property.NodeScope); /** * discovery.ec2.tag.*: The ec2 discovery can filter machines to include in the cluster based on tags (and not just groups). @@ -191,7 +196,7 @@ public interface AwsEc2Service { * instances with a tag key set to stage, and a value of dev. Several tags set will require all of those tags to be set for the * instance to be included. */ - Setting TAG_SETTING = Setting.groupSetting("discovery.ec2.tag.", false,Setting.Scope.CLUSTER); + Setting TAG_SETTING = Setting.groupSetting("discovery.ec2.tag.", Property.NodeScope); } AmazonEC2 client(); diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsSigner.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsSigner.java index 36eae9b5829..a76a2b04a91 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsSigner.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsSigner.java @@ -55,7 +55,7 @@ public class AwsSigner { try { validateSignerType(signer); } catch (IllegalArgumentException e) { - logger.warn(e.getMessage()); + logger.warn("{}", e.getMessage()); } configuration.setSignerOverride(signer); diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/plugin/discovery/ec2/Ec2DiscoveryPlugin.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/plugin/discovery/ec2/Ec2DiscoveryPlugin.java index 12d5682d487..b4c293ec736 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/plugin/discovery/ec2/Ec2DiscoveryPlugin.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/plugin/discovery/ec2/Ec2DiscoveryPlugin.java @@ -134,14 +134,6 @@ public class Ec2DiscoveryPlugin extends Plugin { settingsModule.registerSetting(AwsEc2Service.DISCOVERY_EC2.GROUPS_SETTING); settingsModule.registerSetting(AwsEc2Service.DISCOVERY_EC2.AVAILABILITY_ZONES_SETTING); settingsModule.registerSetting(AwsEc2Service.DISCOVERY_EC2.NODE_CACHE_TIME_SETTING); - - // Filter global settings - settingsModule.registerSettingsFilterIfMissing(AwsEc2Service.KEY_SETTING.getKey()); - settingsModule.registerSettingsFilterIfMissing(AwsEc2Service.SECRET_SETTING.getKey()); - settingsModule.registerSettingsFilterIfMissing(AwsEc2Service.PROXY_PASSWORD_SETTING.getKey()); - settingsModule.registerSettingsFilterIfMissing(AwsEc2Service.CLOUD_EC2.KEY_SETTING.getKey()); - settingsModule.registerSettingsFilterIfMissing(AwsEc2Service.CLOUD_EC2.SECRET_SETTING.getKey()); - settingsModule.registerSettingsFilterIfMissing(AwsEc2Service.CLOUD_EC2.PROXY_PASSWORD_SETTING.getKey()); } /** diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java index 5063d59b40e..1705421207b 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java @@ -191,7 +191,7 @@ public class Ec2DiscoveryTests extends ESTestCase { tagsList.add(tags); } - logger.info("started [{}] instances with [{}] stage=prod tag"); + logger.info("started [{}] instances with [{}] stage=prod tag", nodes, prodInstances); List discoveryNodes = buildDynamicNodes(nodeSettings, nodes, tagsList); assertThat(discoveryNodes, hasSize(prodInstances)); } @@ -222,7 +222,7 @@ public class Ec2DiscoveryTests extends ESTestCase { tagsList.add(tags); } - logger.info("started [{}] instances with [{}] stage=prod tag"); + logger.info("started [{}] instances with [{}] stage=prod tag", nodes, prodInstances); List discoveryNodes = buildDynamicNodes(nodeSettings, nodes, tagsList); assertThat(discoveryNodes, hasSize(prodInstances)); } diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeService.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeService.java index ce5154b3436..a6faa390e5d 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeService.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeService.java @@ -22,12 +22,14 @@ package org.elasticsearch.cloud.gce; import com.google.api.services.compute.model.Instance; import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.TimeValue; import java.io.IOException; import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.function.Function; public interface GceComputeService extends LifecycleComponent { @@ -41,25 +43,25 @@ public interface GceComputeService extends LifecycleComponent /** * cloud.gce.project_id: Google project id */ - Setting PROJECT_SETTING = Setting.simpleString("cloud.gce.project_id", false, Setting.Scope.CLUSTER); + Setting PROJECT_SETTING = Setting.simpleString("cloud.gce.project_id", Property.NodeScope); /** * cloud.gce.zone: Google Compute Engine zones */ Setting> ZONE_SETTING = - Setting.listSetting("cloud.gce.zone", Collections.emptyList(), s -> s, false, Setting.Scope.CLUSTER); + Setting.listSetting("cloud.gce.zone", Collections.emptyList(), Function.identity(), Property.NodeScope); /** * cloud.gce.refresh_interval: How long the list of hosts is cached to prevent further requests to the AWS API. 0 disables caching. * A negative value will cause infinite caching. Defaults to 0s. */ Setting REFRESH_SETTING = - Setting.timeSetting("cloud.gce.refresh_interval", TimeValue.timeValueSeconds(0), false, Setting.Scope.CLUSTER); + Setting.timeSetting("cloud.gce.refresh_interval", TimeValue.timeValueSeconds(0), Property.NodeScope); /** * cloud.gce.retry: Should we retry calling GCE API in case of error? Defaults to true. */ - Setting RETRY_SETTING = Setting.boolSetting("cloud.gce.retry", true, false, Setting.Scope.CLUSTER); + Setting RETRY_SETTING = Setting.boolSetting("cloud.gce.retry", true, Property.NodeScope); /** * cloud.gce.max_wait: How long exponential backoff should retry before definitely failing. @@ -67,7 +69,7 @@ public interface GceComputeService extends LifecycleComponent * A negative value will retry indefinitely. Defaults to `-1s` (retry indefinitely). */ Setting MAX_WAIT_SETTING = - Setting.timeSetting("cloud.gce.max_wait", TimeValue.timeValueSeconds(-1), false, Setting.Scope.CLUSTER); + Setting.timeSetting("cloud.gce.max_wait", TimeValue.timeValueSeconds(-1), Property.NodeScope); /** * Return a collection of running instances within the same GCE project diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeServiceImpl.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeServiceImpl.java index d9033b602d2..85e0910736f 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeServiceImpl.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeServiceImpl.java @@ -38,13 +38,13 @@ import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.gce.RetryHttpInitializerWrapper; import java.io.IOException; import java.net.URL; -import java.nio.file.Files; import java.security.AccessController; import java.security.GeneralSecurityException; import java.security.PrivilegedAction; @@ -61,11 +61,11 @@ public class GceComputeServiceImpl extends AbstractLifecycleComponent GCE_VALIDATE_CERTIFICATES = - Setting.boolSetting("cloud.gce.validate_certificates", true, false, Setting.Scope.CLUSTER); + Setting.boolSetting("cloud.gce.validate_certificates", true, Property.NodeScope); public static final Setting GCE_HOST = - new Setting<>("cloud.gce.host", "http://metadata.google.internal", Function.identity(), false, Setting.Scope.CLUSTER); + new Setting<>("cloud.gce.host", "http://metadata.google.internal", Function.identity(), Property.NodeScope); public static final Setting GCE_ROOT_URL = - new Setting<>("cloud.gce.root_url", "https://www.googleapis.com", Function.identity(), false, Setting.Scope.CLUSTER); + new Setting<>("cloud.gce.root_url", "https://www.googleapis.com", Function.identity(), Property.NodeScope); private final String project; private final List zones; diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java index 4e7956c379e..85f3e3a9585 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; @@ -43,6 +44,7 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.function.Function; /** * @@ -53,7 +55,7 @@ public class GceUnicastHostsProvider extends AbstractComponent implements Unicas * discovery.gce.tags: The gce discovery can filter machines to include in the cluster based on tags. */ public static final Setting> TAGS_SETTING = - Setting.listSetting("discovery.gce.tags", Collections.emptyList(), s -> s, false, Setting.Scope.CLUSTER); + Setting.listSetting("discovery.gce.tags", Collections.emptyList(), Function.identity(), Property.NodeScope); static final class Status { private static final String TERMINATED = "TERMINATED"; diff --git a/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java b/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java index ab50d44a5c7..cf5a0cf41d7 100644 --- a/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java +++ b/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -71,9 +72,12 @@ import static org.elasticsearch.index.mapper.core.TypeParsers.parseMultiField; public class AttachmentMapper extends FieldMapper { private static ESLogger logger = ESLoggerFactory.getLogger("mapper.attachment"); - public static final Setting INDEX_ATTACHMENT_IGNORE_ERRORS_SETTING = Setting.boolSetting("index.mapping.attachment.ignore_errors", true, false, Setting.Scope.INDEX); - public static final Setting INDEX_ATTACHMENT_DETECT_LANGUAGE_SETTING = Setting.boolSetting("index.mapping.attachment.detect_language", false, false, Setting.Scope.INDEX); - public static final Setting INDEX_ATTACHMENT_INDEXED_CHARS_SETTING = Setting.intSetting("index.mapping.attachment.indexed_chars", 100000, false, Setting.Scope.INDEX); + public static final Setting INDEX_ATTACHMENT_IGNORE_ERRORS_SETTING = + Setting.boolSetting("index.mapping.attachment.ignore_errors", true, Property.IndexScope); + public static final Setting INDEX_ATTACHMENT_DETECT_LANGUAGE_SETTING = + Setting.boolSetting("index.mapping.attachment.detect_language", false, Property.IndexScope); + public static final Setting INDEX_ATTACHMENT_INDEXED_CHARS_SETTING = + Setting.intSetting("index.mapping.attachment.indexed_chars", 100000, Property.IndexScope); public static final String CONTENT_TYPE = "attachment"; diff --git a/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/StandaloneRunner.java b/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/StandaloneRunner.java deleted file mode 100644 index 03c6e65047a..00000000000 --- a/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/StandaloneRunner.java +++ /dev/null @@ -1,189 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.mapper.attachments; - -import org.apache.commons.cli.CommandLine; -import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.cli.CliTool; -import org.elasticsearch.common.cli.CliToolConfig; -import org.elasticsearch.common.cli.Terminal; -import org.elasticsearch.common.compress.CompressedXContent; -import org.elasticsearch.common.io.PathUtils; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.env.Environment; -import org.elasticsearch.index.MapperTestUtils; -import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.DocumentMapperParser; -import org.elasticsearch.index.mapper.ParseContext; - -import java.io.IOException; -import java.io.InputStream; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.Locale; - -import static org.elasticsearch.common.cli.CliToolConfig.Builder.cmd; -import static org.elasticsearch.common.cli.CliToolConfig.Builder.option; -import static org.elasticsearch.common.io.Streams.copy; -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.mapper.attachments.AttachmentUnitTestCase.getIndicesModuleWithRegisteredAttachmentMapper; -import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath; - -/** - * This class provides a simple main class which can be used to test what is extracted from a given binary file. - * You can run it using - * -u file://URL/TO/YOUR/DOC - * --size set extracted size (default to mapper attachment size) - * BASE64 encoded binary - * - * Example: - * StandaloneRunner BASE64Text - * StandaloneRunner -u /tmp/mydoc.pdf - * StandaloneRunner -u /tmp/mydoc.pdf --size 1000000 - */ -@SuppressForbidden(reason = "commandline tool") -public class StandaloneRunner extends CliTool { - - private static final CliToolConfig CONFIG = CliToolConfig.config("tika", StandaloneRunner.class) - .cmds(TikaRunner.CMD) - .build(); - - static { - System.setProperty("es.path.home", "/tmp"); - } - - static class TikaRunner extends Command { - private static final String NAME = "tika"; - private final String url; - private final Integer size; - private final String base64text; - private final DocumentMapper docMapper; - - private static final CliToolConfig.Cmd CMD = cmd(NAME, TikaRunner.class) - .options(option("u", "url").required(false).hasArg(false)) - .options(option("t", "size").required(false).hasArg(false)) - .build(); - - protected TikaRunner(Terminal terminal, String url, Integer size, String base64text) throws IOException { - super(terminal); - this.size = size; - this.url = url; - this.base64text = base64text; - DocumentMapperParser mapperParser = MapperTestUtils.newMapperService(PathUtils.get("."), Settings.EMPTY, getIndicesModuleWithRegisteredAttachmentMapper()).documentMapperParser(); // use CWD b/c it won't be used - - String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/attachment/test/standalone/standalone-mapping.json"); - docMapper = mapperParser.parse("person", new CompressedXContent(mapping)); - } - - @Override - public ExitStatus execute(Settings settings, Environment env) throws Exception { - XContentBuilder builder = jsonBuilder().startObject().field("file").startObject(); - - if (base64text != null) { - // If base64 is provided - builder.field("_content", base64text); - } else { - // A file is provided - byte[] bytes = copyToBytes(PathUtils.get(url)); - builder.field("_content", bytes); - } - - if (size >= 0) { - builder.field("_indexed_chars", size); - } - - BytesReference json = builder.endObject().endObject().bytes(); - - ParseContext.Document doc = docMapper.parse("person", "person", "1", json).rootDoc(); - - terminal.println("## Extracted text"); - terminal.println("--------------------- BEGIN -----------------------"); - terminal.println(doc.get("file.content")); - terminal.println("---------------------- END ------------------------"); - terminal.println("## Metadata"); - printMetadataContent(doc, AttachmentMapper.FieldNames.AUTHOR); - printMetadataContent(doc, AttachmentMapper.FieldNames.CONTENT_LENGTH); - printMetadataContent(doc, AttachmentMapper.FieldNames.CONTENT_TYPE); - printMetadataContent(doc, AttachmentMapper.FieldNames.DATE); - printMetadataContent(doc, AttachmentMapper.FieldNames.KEYWORDS); - printMetadataContent(doc, AttachmentMapper.FieldNames.LANGUAGE); - printMetadataContent(doc, AttachmentMapper.FieldNames.NAME); - printMetadataContent(doc, AttachmentMapper.FieldNames.TITLE); - - return ExitStatus.OK; - } - - private void printMetadataContent(ParseContext.Document doc, String field) { - terminal.println("- " + field + ":" + doc.get(docMapper.mappers().getMapper("file." + field).fieldType().name())); - } - - public static byte[] copyToBytes(Path path) throws IOException { - try (InputStream is = Files.newInputStream(path); - BytesStreamOutput out = new BytesStreamOutput()) { - copy(is, out); - return out.bytes().toBytes(); - } - } - - public static Command parse(Terminal terminal, CommandLine cli) throws IOException { - String url = cli.getOptionValue("u"); - String base64text = null; - String sSize = cli.getOptionValue("size"); - Integer size = sSize != null ? Integer.parseInt(sSize) : -1; - if (url == null && cli.getArgs().length == 0) { - return exitCmd(ExitStatus.USAGE, terminal, "url or BASE64 content should be provided (type -h for help)"); - } - if (url == null) { - if (cli.getArgs().length == 0) { - return exitCmd(ExitStatus.USAGE, terminal, "url or BASE64 content should be provided (type -h for help)"); - } - base64text = cli.getArgs()[0]; - } else { - if (cli.getArgs().length == 1) { - return exitCmd(ExitStatus.USAGE, terminal, "url or BASE64 content should be provided. Not both. (type -h for help)"); - } - } - return new TikaRunner(terminal, url, size, base64text); - } - } - - public StandaloneRunner() { - super(CONFIG); - } - - - public static void main(String[] args) throws Exception { - StandaloneRunner pluginManager = new StandaloneRunner(); - pluginManager.execute(args); - } - - @Override - protected Command parse(String cmdName, CommandLine cli) throws Exception { - switch (cmdName.toLowerCase(Locale.ROOT)) { - case TikaRunner.NAME: return TikaRunner.parse(terminal, cli); - default: - assert false : "can't get here as cmd name is validated before this method is called"; - return exitCmd(ExitStatus.CODE_ERROR); - } - } -} diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java index 1c3ece1adad..01d66c177a2 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java @@ -24,6 +24,7 @@ import com.microsoft.azure.storage.StorageException; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; @@ -31,7 +32,6 @@ import java.io.InputStream; import java.io.OutputStream; import java.net.URISyntaxException; import java.util.Map; -import java.util.function.Function; /** * Azure Storage Service interface @@ -41,13 +41,20 @@ public interface AzureStorageService { final class Storage { public static final String PREFIX = "cloud.azure.storage."; - public static final Setting TIMEOUT_SETTING = Setting.timeSetting("cloud.azure.storage.timeout", TimeValue.timeValueSeconds(-1), false, Setting.Scope.CLUSTER); - public static final Setting ACCOUNT_SETTING = Setting.simpleString("repositories.azure.account", false, Setting.Scope.CLUSTER); - public static final Setting CONTAINER_SETTING = Setting.simpleString("repositories.azure.container", false, Setting.Scope.CLUSTER); - public static final Setting BASE_PATH_SETTING = Setting.simpleString("repositories.azure.base_path", false, Setting.Scope.CLUSTER); - public static final Setting LOCATION_MODE_SETTING = Setting.simpleString("repositories.azure.location_mode", false, Setting.Scope.CLUSTER); - public static final Setting CHUNK_SIZE_SETTING = Setting.byteSizeSetting("repositories.azure.chunk_size", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER); - public static final Setting COMPRESS_SETTING = Setting.boolSetting("repositories.azure.compress", false, false, Setting.Scope.CLUSTER); + public static final Setting TIMEOUT_SETTING = + Setting.timeSetting("cloud.azure.storage.timeout", TimeValue.timeValueMinutes(-1), Property.NodeScope); + public static final Setting ACCOUNT_SETTING = + Setting.simpleString("repositories.azure.account", Property.NodeScope, Property.Filtered); + public static final Setting CONTAINER_SETTING = + Setting.simpleString("repositories.azure.container", Property.NodeScope); + public static final Setting BASE_PATH_SETTING = + Setting.simpleString("repositories.azure.base_path", Property.NodeScope); + public static final Setting LOCATION_MODE_SETTING = + Setting.simpleString("repositories.azure.location_mode", Property.NodeScope); + public static final Setting CHUNK_SIZE_SETTING = + Setting.byteSizeSetting("repositories.azure.chunk_size", new ByteSizeValue(-1), Property.NodeScope); + public static final Setting COMPRESS_SETTING = + Setting.boolSetting("repositories.azure.compress", false, Property.NodeScope); } boolean doesContainerExist(String account, LocationMode mode, String container); @@ -62,13 +69,17 @@ public interface AzureStorageService { void deleteBlob(String account, LocationMode mode, String container, String blob) throws URISyntaxException, StorageException; - InputStream getInputStream(String account, LocationMode mode, String container, String blob) throws URISyntaxException, StorageException; + InputStream getInputStream(String account, LocationMode mode, String container, String blob) + throws URISyntaxException, StorageException; - OutputStream getOutputStream(String account, LocationMode mode, String container, String blob) throws URISyntaxException, StorageException; + OutputStream getOutputStream(String account, LocationMode mode, String container, String blob) + throws URISyntaxException, StorageException; - Map listBlobsByPrefix(String account, LocationMode mode, String container, String keyPath, String prefix) throws URISyntaxException, StorageException; + Map listBlobsByPrefix(String account, LocationMode mode, String container, String keyPath, String prefix) + throws URISyntaxException, StorageException; - void moveBlob(String account, LocationMode mode, String container, String sourceBlob, String targetBlob) throws URISyntaxException, StorageException; + void moveBlob(String account, LocationMode mode, String container, String sourceBlob, String targetBlob) + throws URISyntaxException, StorageException; AzureStorageService start(); } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java index cdc6d74edb0..497b0e3753a 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java @@ -169,7 +169,7 @@ public class AzureStorageServiceImpl extends AbstractLifecycleComponent Storage.TIMEOUT_SETTING.get(s).toString(), (s) -> Setting.parseTimeValue(s, TimeValue.timeValueSeconds(-1), TIMEOUT_KEY.toString()), - false, - Setting.Scope.CLUSTER); - private static final Setting ACCOUNT_SETTING = Setting.adfixKeySetting(Storage.PREFIX, ACCOUNT_SUFFIX, "", Function.identity(), false, Setting.Scope.CLUSTER); - private static final Setting KEY_SETTING = Setting.adfixKeySetting(Storage.PREFIX, KEY_SUFFIX, "", Function.identity(), false, Setting.Scope.CLUSTER); - private static final Setting DEFAULT_SETTING = Setting.adfixKeySetting(Storage.PREFIX, DEFAULT_SUFFIX, "false", Boolean::valueOf, false, Setting.Scope.CLUSTER); + Setting.Property.NodeScope); + private static final Setting ACCOUNT_SETTING = + Setting.adfixKeySetting(Storage.PREFIX, ACCOUNT_SUFFIX, "", Function.identity(), Setting.Property.NodeScope); + private static final Setting KEY_SETTING = + Setting.adfixKeySetting(Storage.PREFIX, KEY_SUFFIX, "", Function.identity(), Setting.Property.NodeScope); + private static final Setting DEFAULT_SETTING = + Setting.adfixKeySetting(Storage.PREFIX, DEFAULT_SUFFIX, "false", Boolean::valueOf, Setting.Property.NodeScope); private final String name; @@ -110,7 +112,7 @@ public final class AzureStorageSettings { } private static List createStorageSettings(Settings settings) { - Setting storageGroupSetting = Setting.groupSetting(Storage.PREFIX, false, Setting.Scope.CLUSTER); + Setting storageGroupSetting = Setting.groupSetting(Storage.PREFIX, Setting.Property.NodeScope); // ignore global timeout which has the same prefix but does not belong to any group Settings groups = storageGroupSetting.get(settings.filter((k) -> k.equals(Storage.TIMEOUT_SETTING.getKey()) == false)); List storageSettings = new ArrayList<>(); diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java b/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java index 616b150f954..3ce043500ae 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java @@ -74,9 +74,9 @@ public class AzureRepositoryPlugin extends Plugin { module.registerSetting(AzureStorageService.Storage.BASE_PATH_SETTING); module.registerSetting(AzureStorageService.Storage.CHUNK_SIZE_SETTING); module.registerSetting(AzureStorageService.Storage.LOCATION_MODE_SETTING); - // Cloud storage API settings needed to be hidden + + // Cloud storage API settings using a pattern needed to be hidden module.registerSettingsFilter(AzureStorageService.Storage.PREFIX + "*.account"); module.registerSettingsFilter(AzureStorageService.Storage.PREFIX + "*.key"); - module.registerSettingsFilter(AzureStorageService.Storage.ACCOUNT_SETTING.getKey()); } } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java index f2773bccbbd..66db57fdd92 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -67,12 +68,14 @@ public class AzureRepository extends BlobStoreRepository { public final static String TYPE = "azure"; public static final class Repository { - public static final Setting ACCOUNT_SETTING = Setting.simpleString("account", false, Setting.Scope.CLUSTER); - public static final Setting CONTAINER_SETTING = new Setting<>("container", "elasticsearch-snapshots", Function.identity(), false, Setting.Scope.CLUSTER); - public static final Setting BASE_PATH_SETTING = Setting.simpleString("base_path", false, Setting.Scope.CLUSTER); - public static final Setting LOCATION_MODE_SETTING = Setting.simpleString("location_mode", false, Setting.Scope.CLUSTER); - public static final Setting CHUNK_SIZE_SETTING = Setting.byteSizeSetting("chunk_size", MAX_CHUNK_SIZE, false, Setting.Scope.CLUSTER); - public static final Setting COMPRESS_SETTING = Setting.boolSetting("compress", false, false, Setting.Scope.CLUSTER); + public static final Setting ACCOUNT_SETTING = Setting.simpleString("account", Property.NodeScope); + public static final Setting CONTAINER_SETTING = + new Setting<>("container", "elasticsearch-snapshots", Function.identity(), Property.NodeScope); + public static final Setting BASE_PATH_SETTING = Setting.simpleString("base_path", Property.NodeScope); + public static final Setting LOCATION_MODE_SETTING = Setting.simpleString("location_mode", Property.NodeScope); + public static final Setting CHUNK_SIZE_SETTING = + Setting.byteSizeSetting("chunk_size", MAX_CHUNK_SIZE, Property.NodeScope); + public static final Setting COMPRESS_SETTING = Setting.boolSetting("compress", false, Property.NodeScope); } private final AzureBlobStore blobStore; diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 915a85ebdc4..8fc9e50d7f3 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -45,6 +45,7 @@ dependencies { compile 'com.google.guava:guava:16.0.1' compile 'com.google.protobuf:protobuf-java:2.5.0' compile 'commons-logging:commons-logging:1.1.3' + compile 'commons-cli:commons-cli:1.2' compile 'commons-collections:commons-collections:3.2.2' compile 'commons-configuration:commons-configuration:1.6' compile 'commons-io:commons-io:2.4' diff --git a/plugins/repository-hdfs/licenses/commons-cli-1.2.jar.sha1 b/plugins/repository-hdfs/licenses/commons-cli-1.2.jar.sha1 new file mode 100644 index 00000000000..d38d00127e8 --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-cli-1.2.jar.sha1 @@ -0,0 +1 @@ +2bf96b7aa8b611c177d329452af1dc933e14501c \ No newline at end of file diff --git a/distribution/licenses/commons-cli-LICENSE.txt b/plugins/repository-hdfs/licenses/commons-cli-LICENSE.txt similarity index 100% rename from distribution/licenses/commons-cli-LICENSE.txt rename to plugins/repository-hdfs/licenses/commons-cli-LICENSE.txt diff --git a/distribution/licenses/commons-cli-NOTICE.txt b/plugins/repository-hdfs/licenses/commons-cli-NOTICE.txt similarity index 100% rename from distribution/licenses/commons-cli-NOTICE.txt rename to plugins/repository-hdfs/licenses/commons-cli-NOTICE.txt diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/AwsS3Service.java b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/AwsS3Service.java index 3ccd6d7987f..427c454fa28 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/AwsS3Service.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/AwsS3Service.java @@ -23,6 +23,7 @@ import com.amazonaws.Protocol; import com.amazonaws.services.s3.AmazonS3; import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import java.util.Locale; import java.util.function.Function; @@ -38,40 +39,44 @@ public interface AwsS3Service extends LifecycleComponent { /** * cloud.aws.access_key: AWS Access key. Shared with discovery-ec2 plugin */ - Setting KEY_SETTING = Setting.simpleString("cloud.aws.access_key", false, Setting.Scope.CLUSTER); + Setting KEY_SETTING = + Setting.simpleString("cloud.aws.access_key", Property.NodeScope, Property.Filtered); /** * cloud.aws.secret_key: AWS Secret key. Shared with discovery-ec2 plugin */ - Setting SECRET_SETTING = Setting.simpleString("cloud.aws.secret_key", false, Setting.Scope.CLUSTER); + Setting SECRET_SETTING = + Setting.simpleString("cloud.aws.secret_key", Property.NodeScope, Property.Filtered); /** * cloud.aws.protocol: Protocol for AWS API: http or https. Defaults to https. Shared with discovery-ec2 plugin */ Setting PROTOCOL_SETTING = new Setting<>("cloud.aws.protocol", "https", s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), - false, Setting.Scope.CLUSTER); + Property.NodeScope); /** * cloud.aws.proxy.host: In case of proxy, define its hostname/IP. Shared with discovery-ec2 plugin */ - Setting PROXY_HOST_SETTING = Setting.simpleString("cloud.aws.proxy.host", false, Setting.Scope.CLUSTER); + Setting PROXY_HOST_SETTING = Setting.simpleString("cloud.aws.proxy.host", Property.NodeScope); /** * cloud.aws.proxy.port: In case of proxy, define its port. Defaults to 80. Shared with discovery-ec2 plugin */ - Setting PROXY_PORT_SETTING = Setting.intSetting("cloud.aws.proxy.port", 80, 0, 1<<16, false, Setting.Scope.CLUSTER); + Setting PROXY_PORT_SETTING = Setting.intSetting("cloud.aws.proxy.port", 80, 0, 1<<16, Property.NodeScope); /** * cloud.aws.proxy.username: In case of proxy with auth, define the username. Shared with discovery-ec2 plugin */ - Setting PROXY_USERNAME_SETTING = Setting.simpleString("cloud.aws.proxy.username", false, Setting.Scope.CLUSTER); + Setting PROXY_USERNAME_SETTING = Setting.simpleString("cloud.aws.proxy.username", Property.NodeScope); /** * cloud.aws.proxy.password: In case of proxy with auth, define the password. Shared with discovery-ec2 plugin */ - Setting PROXY_PASSWORD_SETTING = Setting.simpleString("cloud.aws.proxy.password", false, Setting.Scope.CLUSTER); + Setting PROXY_PASSWORD_SETTING = + Setting.simpleString("cloud.aws.proxy.password", Property.NodeScope, Property.Filtered); /** * cloud.aws.signer: If you are using an old AWS API version, you can define a Signer. Shared with discovery-ec2 plugin */ - Setting SIGNER_SETTING = Setting.simpleString("cloud.aws.signer", false, Setting.Scope.CLUSTER); + Setting SIGNER_SETTING = Setting.simpleString("cloud.aws.signer", Property.NodeScope); /** * cloud.aws.region: Region. Shared with discovery-ec2 plugin */ - Setting REGION_SETTING = new Setting<>("cloud.aws.region", "", s -> s.toLowerCase(Locale.ROOT), false, Setting.Scope.CLUSTER); + Setting REGION_SETTING = + new Setting<>("cloud.aws.region", "", s -> s.toLowerCase(Locale.ROOT), Property.NodeScope); /** * Defines specific s3 settings starting with cloud.aws.s3. @@ -82,68 +87,70 @@ public interface AwsS3Service extends LifecycleComponent { * @see AwsS3Service#KEY_SETTING */ Setting KEY_SETTING = - new Setting<>("cloud.aws.s3.access_key", AwsS3Service.KEY_SETTING, Function.identity(), false, Setting.Scope.CLUSTER); + new Setting<>("cloud.aws.s3.access_key", AwsS3Service.KEY_SETTING, Function.identity(), + Property.NodeScope, Property.Filtered); /** * cloud.aws.s3.secret_key: AWS Secret key specific for S3 API calls. Defaults to cloud.aws.secret_key. * @see AwsS3Service#SECRET_SETTING */ Setting SECRET_SETTING = - new Setting<>("cloud.aws.s3.secret_key", AwsS3Service.SECRET_SETTING, Function.identity(), false, Setting.Scope.CLUSTER); + new Setting<>("cloud.aws.s3.secret_key", AwsS3Service.SECRET_SETTING, Function.identity(), + Property.NodeScope, Property.Filtered); /** * cloud.aws.s3.protocol: Protocol for AWS API specific for S3 API calls: http or https. Defaults to cloud.aws.protocol. * @see AwsS3Service#PROTOCOL_SETTING */ Setting PROTOCOL_SETTING = - new Setting<>("cloud.aws.s3.protocol", AwsS3Service.PROTOCOL_SETTING, s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), false, - Setting.Scope.CLUSTER); + new Setting<>("cloud.aws.s3.protocol", AwsS3Service.PROTOCOL_SETTING, s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), + Property.NodeScope); /** * cloud.aws.s3.proxy.host: In case of proxy, define its hostname/IP specific for S3 API calls. Defaults to cloud.aws.proxy.host. * @see AwsS3Service#PROXY_HOST_SETTING */ Setting PROXY_HOST_SETTING = - new Setting<>("cloud.aws.s3.proxy.host", AwsS3Service.PROXY_HOST_SETTING, Function.identity(), false, Setting.Scope.CLUSTER); + new Setting<>("cloud.aws.s3.proxy.host", AwsS3Service.PROXY_HOST_SETTING, Function.identity(), + Property.NodeScope); /** * cloud.aws.s3.proxy.port: In case of proxy, define its port specific for S3 API calls. Defaults to cloud.aws.proxy.port. * @see AwsS3Service#PROXY_PORT_SETTING */ Setting PROXY_PORT_SETTING = new Setting<>("cloud.aws.s3.proxy.port", AwsS3Service.PROXY_PORT_SETTING, - s -> Setting.parseInt(s, 0, 1<<16, "cloud.aws.s3.proxy.port"), false, Setting.Scope.CLUSTER); + s -> Setting.parseInt(s, 0, 1<<16, "cloud.aws.s3.proxy.port"), Property.NodeScope); /** * cloud.aws.s3.proxy.username: In case of proxy with auth, define the username specific for S3 API calls. * Defaults to cloud.aws.proxy.username. * @see AwsS3Service#PROXY_USERNAME_SETTING */ Setting PROXY_USERNAME_SETTING = - new Setting<>("cloud.aws.s3.proxy.username", AwsS3Service.PROXY_USERNAME_SETTING, Function.identity(), false, - Setting.Scope.CLUSTER); + new Setting<>("cloud.aws.s3.proxy.username", AwsS3Service.PROXY_USERNAME_SETTING, Function.identity(), + Property.NodeScope); /** * cloud.aws.s3.proxy.password: In case of proxy with auth, define the password specific for S3 API calls. * Defaults to cloud.aws.proxy.password. * @see AwsS3Service#PROXY_PASSWORD_SETTING */ Setting PROXY_PASSWORD_SETTING = - new Setting<>("cloud.aws.s3.proxy.password", AwsS3Service.PROXY_PASSWORD_SETTING, Function.identity(), false, - Setting.Scope.CLUSTER); + new Setting<>("cloud.aws.s3.proxy.password", AwsS3Service.PROXY_PASSWORD_SETTING, Function.identity(), + Property.NodeScope, Property.Filtered); /** * cloud.aws.s3.signer: If you are using an old AWS API version, you can define a Signer. Specific for S3 API calls. * Defaults to cloud.aws.signer. * @see AwsS3Service#SIGNER_SETTING */ Setting SIGNER_SETTING = - new Setting<>("cloud.aws.s3.signer", AwsS3Service.SIGNER_SETTING, Function.identity(), false, Setting.Scope.CLUSTER); + new Setting<>("cloud.aws.s3.signer", AwsS3Service.SIGNER_SETTING, Function.identity(), Property.NodeScope); /** * cloud.aws.s3.region: Region specific for S3 API calls. Defaults to cloud.aws.region. * @see AwsS3Service#REGION_SETTING */ Setting REGION_SETTING = - new Setting<>("cloud.aws.s3.region", AwsS3Service.REGION_SETTING, s -> s.toLowerCase(Locale.ROOT), false, - Setting.Scope.CLUSTER); + new Setting<>("cloud.aws.s3.region", AwsS3Service.REGION_SETTING, s -> s.toLowerCase(Locale.ROOT), + Property.NodeScope); /** * cloud.aws.s3.endpoint: Endpoint. If not set, endpoint will be guessed based on region setting. */ - Setting ENDPOINT_SETTING = - Setting.simpleString("cloud.aws.s3.endpoint", false, Setting.Scope.CLUSTER); + Setting ENDPOINT_SETTING = Setting.simpleString("cloud.aws.s3.endpoint", Property.NodeScope); } AmazonS3 client(String endpoint, Protocol protocol, String region, String account, String key, Integer maxRetries); diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/AwsSigner.java b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/AwsSigner.java index c94491696c0..5c02671e5e9 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/AwsSigner.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/AwsSigner.java @@ -64,7 +64,7 @@ public class AwsSigner { try { validateSignerType(signer, endpoint); } catch (IllegalArgumentException e) { - logger.warn(e.getMessage()); + logger.warn("{}", e.getMessage()); } configuration.setSignerOverride(signer); diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/plugin/repository/s3/S3RepositoryPlugin.java b/plugins/repository-s3/src/main/java/org/elasticsearch/plugin/repository/s3/S3RepositoryPlugin.java index 5d21bb4e2ac..d07d8c174c5 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/plugin/repository/s3/S3RepositoryPlugin.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/plugin/repository/s3/S3RepositoryPlugin.java @@ -144,16 +144,6 @@ public class S3RepositoryPlugin extends Plugin { settingsModule.registerSetting(S3Repository.Repository.STORAGE_CLASS_SETTING); settingsModule.registerSetting(S3Repository.Repository.CANNED_ACL_SETTING); settingsModule.registerSetting(S3Repository.Repository.BASE_PATH_SETTING); - - // Filter global settings - settingsModule.registerSettingsFilterIfMissing(AwsS3Service.KEY_SETTING.getKey()); - settingsModule.registerSettingsFilterIfMissing(AwsS3Service.SECRET_SETTING.getKey()); - settingsModule.registerSettingsFilterIfMissing(AwsS3Service.PROXY_PASSWORD_SETTING.getKey()); - settingsModule.registerSettingsFilterIfMissing(AwsS3Service.CLOUD_S3.KEY_SETTING.getKey()); - settingsModule.registerSettingsFilterIfMissing(AwsS3Service.CLOUD_S3.SECRET_SETTING.getKey()); - settingsModule.registerSettingsFilterIfMissing(AwsS3Service.CLOUD_S3.PROXY_PASSWORD_SETTING.getKey()); - settingsModule.registerSettingsFilter(S3Repository.Repository.KEY_SETTING.getKey()); - settingsModule.registerSettingsFilter(S3Repository.Repository.SECRET_SETTING.getKey()); } /** diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index 3edead0765e..fde774a6b92 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.index.snapshots.IndexShardRepository; @@ -65,70 +66,78 @@ public class S3Repository extends BlobStoreRepository { * repositories.s3.access_key: AWS Access key specific for all S3 Repositories API calls. Defaults to cloud.aws.s3.access_key. * @see CLOUD_S3#KEY_SETTING */ - Setting KEY_SETTING = new Setting<>("repositories.s3.access_key", CLOUD_S3.KEY_SETTING, Function.identity(), false, Setting.Scope.CLUSTER); + Setting KEY_SETTING = + new Setting<>("repositories.s3.access_key", CLOUD_S3.KEY_SETTING, Function.identity(), Property.NodeScope); /** * repositories.s3.secret_key: AWS Secret key specific for all S3 Repositories API calls. Defaults to cloud.aws.s3.secret_key. * @see CLOUD_S3#SECRET_SETTING */ - Setting SECRET_SETTING = new Setting<>("repositories.s3.secret_key", CLOUD_S3.SECRET_SETTING, Function.identity(), false, Setting.Scope.CLUSTER); + Setting SECRET_SETTING = + new Setting<>("repositories.s3.secret_key", CLOUD_S3.SECRET_SETTING, Function.identity(), Property.NodeScope); /** * repositories.s3.region: Region specific for all S3 Repositories API calls. Defaults to cloud.aws.s3.region. * @see CLOUD_S3#REGION_SETTING */ - Setting REGION_SETTING = new Setting<>("repositories.s3.region", CLOUD_S3.REGION_SETTING, s -> s.toLowerCase(Locale.ROOT), false, Setting.Scope.CLUSTER); + Setting REGION_SETTING = + new Setting<>("repositories.s3.region", CLOUD_S3.REGION_SETTING, s -> s.toLowerCase(Locale.ROOT), Property.NodeScope); /** * repositories.s3.endpoint: Endpoint specific for all S3 Repositories API calls. Defaults to cloud.aws.s3.endpoint. * @see CLOUD_S3#ENDPOINT_SETTING */ - Setting ENDPOINT_SETTING = new Setting<>("repositories.s3.endpoint", CLOUD_S3.ENDPOINT_SETTING, s -> s.toLowerCase(Locale.ROOT), false, Setting.Scope.CLUSTER); + Setting ENDPOINT_SETTING = + new Setting<>("repositories.s3.endpoint", CLOUD_S3.ENDPOINT_SETTING, s -> s.toLowerCase(Locale.ROOT), Property.NodeScope); /** * repositories.s3.protocol: Protocol specific for all S3 Repositories API calls. Defaults to cloud.aws.s3.protocol. * @see CLOUD_S3#PROTOCOL_SETTING */ - Setting PROTOCOL_SETTING = new Setting<>("repositories.s3.protocol", CLOUD_S3.PROTOCOL_SETTING, s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), false, Setting.Scope.CLUSTER); + Setting PROTOCOL_SETTING = + new Setting<>("repositories.s3.protocol", CLOUD_S3.PROTOCOL_SETTING, s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), Property.NodeScope); /** * repositories.s3.bucket: The name of the bucket to be used for snapshots. */ - Setting BUCKET_SETTING = Setting.simpleString("repositories.s3.bucket", false, Setting.Scope.CLUSTER); + Setting BUCKET_SETTING = Setting.simpleString("repositories.s3.bucket", Property.NodeScope); /** * repositories.s3.server_side_encryption: When set to true files are encrypted on server side using AES256 algorithm. * Defaults to false. */ - Setting SERVER_SIDE_ENCRYPTION_SETTING = Setting.boolSetting("repositories.s3.server_side_encryption", false, false, Setting.Scope.CLUSTER); + Setting SERVER_SIDE_ENCRYPTION_SETTING = + Setting.boolSetting("repositories.s3.server_side_encryption", false, Property.NodeScope); /** * repositories.s3.buffer_size: Minimum threshold below which the chunk is uploaded using a single request. Beyond this threshold, * the S3 repository will use the AWS Multipart Upload API to split the chunk into several parts, each of buffer_size length, and * to upload each part in its own request. Note that setting a buffer size lower than 5mb is not allowed since it will prevents the * use of the Multipart API and may result in upload errors. Defaults to 5mb. */ - Setting BUFFER_SIZE_SETTING = Setting.byteSizeSetting("repositories.s3.buffer_size", S3BlobStore.MIN_BUFFER_SIZE, false, Setting.Scope.CLUSTER); + Setting BUFFER_SIZE_SETTING = + Setting.byteSizeSetting("repositories.s3.buffer_size", S3BlobStore.MIN_BUFFER_SIZE, Property.NodeScope); /** * repositories.s3.max_retries: Number of retries in case of S3 errors. Defaults to 3. */ - Setting MAX_RETRIES_SETTING = Setting.intSetting("repositories.s3.max_retries", 3, false, Setting.Scope.CLUSTER); + Setting MAX_RETRIES_SETTING = Setting.intSetting("repositories.s3.max_retries", 3, Property.NodeScope); /** * repositories.s3.chunk_size: Big files can be broken down into chunks during snapshotting if needed. Defaults to 100m. */ - Setting CHUNK_SIZE_SETTING = Setting.byteSizeSetting("repositories.s3.chunk_size", new ByteSizeValue(100, ByteSizeUnit.MB), false, Setting.Scope.CLUSTER); + Setting CHUNK_SIZE_SETTING = + Setting.byteSizeSetting("repositories.s3.chunk_size", new ByteSizeValue(100, ByteSizeUnit.MB), Property.NodeScope); /** * repositories.s3.compress: When set to true metadata files are stored in compressed format. This setting doesn’t affect index * files that are already compressed by default. Defaults to false. */ - Setting COMPRESS_SETTING = Setting.boolSetting("repositories.s3.compress", false, false, Setting.Scope.CLUSTER); + Setting COMPRESS_SETTING = Setting.boolSetting("repositories.s3.compress", false, Property.NodeScope); /** * repositories.s3.storage_class: Sets the S3 storage class type for the backup files. Values may be standard, reduced_redundancy, * standard_ia. Defaults to standard. */ - Setting STORAGE_CLASS_SETTING = Setting.simpleString("repositories.s3.storage_class", false, Setting.Scope.CLUSTER); + Setting STORAGE_CLASS_SETTING = Setting.simpleString("repositories.s3.storage_class", Property.NodeScope); /** * repositories.s3.canned_acl: The S3 repository supports all S3 canned ACLs : private, public-read, public-read-write, * authenticated-read, log-delivery-write, bucket-owner-read, bucket-owner-full-control. Defaults to private. */ - Setting CANNED_ACL_SETTING = Setting.simpleString("repositories.s3.canned_acl", false, Setting.Scope.CLUSTER); + Setting CANNED_ACL_SETTING = Setting.simpleString("repositories.s3.canned_acl", Property.NodeScope); /** * repositories.s3.base_path: Specifies the path within bucket to repository data. Defaults to root directory. */ - Setting BASE_PATH_SETTING = Setting.simpleString("repositories.s3.base_path", false, Setting.Scope.CLUSTER); + Setting BASE_PATH_SETTING = Setting.simpleString("repositories.s3.base_path", Property.NodeScope); } /** @@ -140,72 +149,75 @@ public class S3Repository extends BlobStoreRepository { * access_key * @see Repositories#KEY_SETTING */ - Setting KEY_SETTING = Setting.simpleString("access_key", false, Setting.Scope.CLUSTER); + Setting KEY_SETTING = Setting.simpleString("access_key", Property.NodeScope, Property.Filtered); /** * secret_key * @see Repositories#SECRET_SETTING */ - Setting SECRET_SETTING = Setting.simpleString("secret_key", false, Setting.Scope.CLUSTER); + Setting SECRET_SETTING = Setting.simpleString("secret_key", Property.NodeScope, Property.Filtered); /** * bucket * @see Repositories#BUCKET_SETTING */ - Setting BUCKET_SETTING = Setting.simpleString("bucket", false, Setting.Scope.CLUSTER); + Setting BUCKET_SETTING = Setting.simpleString("bucket", Property.NodeScope); /** * endpoint * @see Repositories#ENDPOINT_SETTING */ - Setting ENDPOINT_SETTING = Setting.simpleString("endpoint", false, Setting.Scope.CLUSTER); + Setting ENDPOINT_SETTING = Setting.simpleString("endpoint", Property.NodeScope); /** * protocol * @see Repositories#PROTOCOL_SETTING */ - Setting PROTOCOL_SETTING = new Setting<>("protocol", "https", s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), false, Setting.Scope.CLUSTER); + Setting PROTOCOL_SETTING = + new Setting<>("protocol", "https", s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), Property.NodeScope); /** * region * @see Repositories#REGION_SETTING */ - Setting REGION_SETTING = new Setting<>("region", "", s -> s.toLowerCase(Locale.ROOT), false, Setting.Scope.CLUSTER); + Setting REGION_SETTING = new Setting<>("region", "", s -> s.toLowerCase(Locale.ROOT), Property.NodeScope); /** * server_side_encryption * @see Repositories#SERVER_SIDE_ENCRYPTION_SETTING */ - Setting SERVER_SIDE_ENCRYPTION_SETTING = Setting.boolSetting("server_side_encryption", false, false, Setting.Scope.CLUSTER); + Setting SERVER_SIDE_ENCRYPTION_SETTING = + Setting.boolSetting("server_side_encryption", false, Property.NodeScope); /** * buffer_size * @see Repositories#BUFFER_SIZE_SETTING */ - Setting BUFFER_SIZE_SETTING = Setting.byteSizeSetting("buffer_size", S3BlobStore.MIN_BUFFER_SIZE, false, Setting.Scope.CLUSTER); + Setting BUFFER_SIZE_SETTING = + Setting.byteSizeSetting("buffer_size", S3BlobStore.MIN_BUFFER_SIZE, Property.NodeScope); /** * max_retries * @see Repositories#MAX_RETRIES_SETTING */ - Setting MAX_RETRIES_SETTING = Setting.intSetting("max_retries", 3, false, Setting.Scope.CLUSTER); + Setting MAX_RETRIES_SETTING = Setting.intSetting("max_retries", 3, Property.NodeScope); /** * chunk_size * @see Repositories#CHUNK_SIZE_SETTING */ - Setting CHUNK_SIZE_SETTING = Setting.byteSizeSetting("chunk_size", "-1", false, Setting.Scope.CLUSTER); + Setting CHUNK_SIZE_SETTING = Setting.byteSizeSetting("chunk_size", "-1", Property.NodeScope); /** * compress * @see Repositories#COMPRESS_SETTING */ - Setting COMPRESS_SETTING = Setting.boolSetting("compress", false, false, Setting.Scope.CLUSTER); + Setting COMPRESS_SETTING = Setting.boolSetting("compress", false, Property.NodeScope); /** * storage_class * @see Repositories#STORAGE_CLASS_SETTING */ - Setting STORAGE_CLASS_SETTING = Setting.simpleString("storage_class", false, Setting.Scope.CLUSTER); + Setting STORAGE_CLASS_SETTING = Setting.simpleString("storage_class", Property.NodeScope); /** * canned_acl * @see Repositories#CANNED_ACL_SETTING */ - Setting CANNED_ACL_SETTING = Setting.simpleString("canned_acl", false, Setting.Scope.CLUSTER); + Setting CANNED_ACL_SETTING = Setting.simpleString("canned_acl", Property.NodeScope); /** * base_path * @see Repositories#BASE_PATH_SETTING */ - Setting BASE_PATH_SETTING = Setting.simpleString("base_path", false, Setting.Scope.CLUSTER); + Setting BASE_PATH_SETTING = Setting.simpleString("base_path", Property.NodeScope); } private final S3BlobStore blobStore; diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/BootstrapCliParserTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/BootstrapCliParserTests.java index c172999e7cb..fc7504fc97f 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/BootstrapCliParserTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/BootstrapCliParserTests.java @@ -19,18 +19,6 @@ package org.elasticsearch.bootstrap; -import org.elasticsearch.Build; -import org.elasticsearch.Version; -import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.cli.CliTool.ExitStatus; -import org.elasticsearch.common.cli.CliToolTestCase; -import org.elasticsearch.common.cli.MockTerminal; -import org.elasticsearch.common.cli.UserError; -import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.monitor.jvm.JvmInfo; -import org.junit.After; -import org.junit.Before; - import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -38,18 +26,27 @@ import java.util.List; import java.util.Locale; import java.util.Map; -import static org.elasticsearch.common.cli.CliTool.ExitStatus.OK; -import static org.elasticsearch.common.cli.CliTool.ExitStatus.OK_AND_EXIT; -import static org.elasticsearch.common.cli.CliTool.ExitStatus.USAGE; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.hasItem; +import joptsimple.OptionException; +import org.elasticsearch.Build; +import org.elasticsearch.Version; +import org.elasticsearch.cli.Command; +import org.elasticsearch.cli.CommandTestCase; +import org.elasticsearch.cli.UserError; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.monitor.jvm.JvmInfo; +import org.junit.After; +import org.junit.Before; + import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.nullValue; @SuppressForbidden(reason = "modifies system properties intentionally") -public class BootstrapCliParserTests extends CliToolTestCase { +public class BootstrapCliParserTests extends CommandTestCase { + + @Override + protected Command newCommand() { + return new BootstrapCliParser(); + } - private MockTerminal terminal = new MockTerminal(); private List propertiesToClear = new ArrayList<>(); private Map properties; @@ -67,195 +64,93 @@ public class BootstrapCliParserTests extends CliToolTestCase { assertEquals("properties leaked", properties, new HashMap<>(System.getProperties())); } - public void testThatVersionIsReturned() throws Exception { - BootstrapCLIParser parser = new BootstrapCLIParser(terminal); - ExitStatus status = parser.execute(args("version")); - assertStatus(status, OK_AND_EXIT); - - String output = terminal.getOutput(); - assertTrue(output, output.contains(Version.CURRENT.toString())); - assertTrue(output, output.contains(Build.CURRENT.shortHash())); - assertTrue(output, output.contains(Build.CURRENT.date())); - assertTrue(output, output.contains(JvmInfo.jvmInfo().version())); + void assertShouldRun(boolean shouldRun) { + BootstrapCliParser parser = (BootstrapCliParser)command; + assertEquals(shouldRun, parser.shouldRun()); } - public void testThatVersionIsReturnedAsStartParameter() throws Exception { - BootstrapCLIParser parser = new BootstrapCLIParser(terminal); - ExitStatus status = parser.execute(args("start -V")); - assertStatus(status, OK_AND_EXIT); - - String output = terminal.getOutput(); + public void testVersion() throws Exception { + String output = execute("-V"); assertTrue(output, output.contains(Version.CURRENT.toString())); assertTrue(output, output.contains(Build.CURRENT.shortHash())); assertTrue(output, output.contains(Build.CURRENT.date())); assertTrue(output, output.contains(JvmInfo.jvmInfo().version())); + assertShouldRun(false); - terminal.resetOutput(); - parser = new BootstrapCLIParser(terminal); - status = parser.execute(args("start --version")); - assertStatus(status, OK_AND_EXIT); - - output = terminal.getOutput(); + terminal.reset(); + output = execute("--version"); assertTrue(output, output.contains(Version.CURRENT.toString())); assertTrue(output, output.contains(Build.CURRENT.shortHash())); assertTrue(output, output.contains(Build.CURRENT.date())); assertTrue(output, output.contains(JvmInfo.jvmInfo().version())); + assertShouldRun(false); } - public void testThatPidFileCanBeConfigured() throws Exception { - BootstrapCLIParser parser = new BootstrapCLIParser(terminal); + public void testPidfile() throws Exception { registerProperties("es.pidfile"); - ExitStatus status = parser.execute(args("start --pidfile")); // missing pid file - assertStatus(status, USAGE); + // missing argument + OptionException e = expectThrows(OptionException.class, () -> { + execute("-p"); + }); + assertEquals("Option p/pidfile requires an argument", e.getMessage()); + assertShouldRun(false); // good cases - status = parser.execute(args("start --pidfile /tmp/pid")); - assertStatus(status, OK); + terminal.reset(); + execute("--pidfile", "/tmp/pid"); assertSystemProperty("es.pidfile", "/tmp/pid"); + assertShouldRun(true); System.clearProperty("es.pidfile"); - status = parser.execute(args("start -p /tmp/pid")); - assertStatus(status, OK); + terminal.reset(); + execute("-p", "/tmp/pid"); assertSystemProperty("es.pidfile", "/tmp/pid"); + assertShouldRun(true); } - public void testThatParsingDaemonizeWorks() throws Exception { - BootstrapCLIParser parser = new BootstrapCLIParser(terminal); + public void testNoDaemonize() throws Exception { registerProperties("es.foreground"); - ExitStatus status = parser.execute(args("start -d")); - assertStatus(status, OK); - assertThat(System.getProperty("es.foreground"), is("false")); + execute(); + assertSystemProperty("es.foreground", null); + assertShouldRun(true); } - public void testThatNotDaemonizingDoesNotConfigureProperties() throws Exception { - BootstrapCLIParser parser = new BootstrapCLIParser(terminal); + public void testDaemonize() throws Exception { registerProperties("es.foreground"); - ExitStatus status = parser.execute(args("start")); - assertStatus(status, OK); - assertThat(System.getProperty("es.foreground"), is(nullValue())); + execute("-d"); + assertSystemProperty("es.foreground", "false"); + assertShouldRun(true); + + System.clearProperty("es.foreground"); + execute("--daemonize"); + assertSystemProperty("es.foreground", "false"); + assertShouldRun(true); } - public void testThatJavaPropertyStyleArgumentsCanBeParsed() throws Exception { - BootstrapCLIParser parser = new BootstrapCLIParser(terminal); + public void testConfig() throws Exception { registerProperties("es.foo", "es.spam"); - ExitStatus status = parser.execute(args("start -Dfoo=bar -Dspam=eggs")); - assertStatus(status, OK); + execute("-Dfoo=bar", "-Dspam=eggs"); assertSystemProperty("es.foo", "bar"); assertSystemProperty("es.spam", "eggs"); + assertShouldRun(true); } - public void testThatJavaPropertyStyleArgumentsWithEsPrefixAreNotPrefixedTwice() throws Exception { - BootstrapCLIParser parser = new BootstrapCLIParser(terminal); - registerProperties("es.spam", "es.pidfile"); - - ExitStatus status = parser.execute(args("start -Des.pidfile=/path/to/foo/elasticsearch/distribution/zip/target/integ-tests/es.pid -Dspam=eggs")); - assertStatus(status, OK); - assertThat(System.getProperty("es.es.pidfile"), is(nullValue())); - assertSystemProperty("es.pidfile", "/path/to/foo/elasticsearch/distribution/zip/target/integ-tests/es.pid"); - assertSystemProperty("es.spam", "eggs"); - } - - public void testThatUnknownLongOptionsCanBeParsed() throws Exception { - BootstrapCLIParser parser = new BootstrapCLIParser(terminal); - registerProperties("es.network.host", "es.my.option"); - - ExitStatus status = parser.execute(args("start --network.host 127.0.0.1 --my.option=true")); - assertStatus(status, OK); - assertSystemProperty("es.network.host", "127.0.0.1"); - assertSystemProperty("es.my.option", "true"); - } - - public void testThatUnknownLongOptionsNeedAValue() throws Exception { - BootstrapCLIParser parser = new BootstrapCLIParser(terminal); - registerProperties("es.network.host"); - - ExitStatus status = parser.execute(args("start --network.host")); - assertStatus(status, USAGE); - String output = terminal.getOutput(); - assertTrue(output, output.contains("Parameter [network.host] needs value")); - - terminal.resetOutput(); - status = parser.execute(args("start --network.host --foo")); - assertStatus(status, USAGE); - output = terminal.getOutput(); - assertTrue(output, output.contains("Parameter [network.host] needs value")); - } - - public void testParsingErrors() throws Exception { - BootstrapCLIParser parser = new BootstrapCLIParser(terminal); - - // unknown params - ExitStatus status = parser.execute(args("version --unknown-param /tmp/pid")); - assertStatus(status, USAGE); - String output = terminal.getOutput(); - assertTrue(output, output.contains("Unrecognized option: --unknown-param")); - - // single dash in extra params - terminal.resetOutput(); - parser = new BootstrapCLIParser(terminal); - status = parser.execute(args("start -network.host 127.0.0.1")); - assertStatus(status, USAGE); - output = terminal.getOutput(); - assertTrue(output, output.contains("Parameter [-network.host]does not start with --")); - - // never ended parameter - terminal = new MockTerminal(); - parser = new BootstrapCLIParser(terminal); - status = parser.execute(args("start --network.host")); - assertStatus(status, USAGE); - output = terminal.getOutput(); - assertTrue(output, output.contains("Parameter [network.host] needs value")); - - // free floating value - terminal = new MockTerminal(); - parser = new BootstrapCLIParser(terminal); - status = parser.execute(args("start 127.0.0.1")); - assertStatus(status, USAGE); - output = terminal.getOutput(); - assertTrue(output, output.contains("Parameter [127.0.0.1]does not start with --")); - } - - public void testHelpWorks() throws Exception { - List> tuples = new ArrayList<>(); - tuples.add(new Tuple<>("version --help", "elasticsearch-version.help")); - tuples.add(new Tuple<>("version -h", "elasticsearch-version.help")); - tuples.add(new Tuple<>("start --help", "elasticsearch-start.help")); - tuples.add(new Tuple<>("start -h", "elasticsearch-start.help")); - tuples.add(new Tuple<>("--help", "elasticsearch.help")); - tuples.add(new Tuple<>("-h", "elasticsearch.help")); - - for (Tuple tuple : tuples) { - terminal.resetOutput(); - BootstrapCLIParser parser = new BootstrapCLIParser(terminal); - ExitStatus status = parser.execute(args(tuple.v1())); - assertStatus(status, OK_AND_EXIT); - assertTerminalOutputContainsHelpFile(terminal, "/org/elasticsearch/bootstrap/" + tuple.v2()); - } - } - - public void testThatSpacesInParametersAreSupported() throws Exception { - // emulates: bin/elasticsearch --node.name "'my node with spaces'" --pidfile "'/tmp/my pid.pid'" - BootstrapCLIParser parser = new BootstrapCLIParser(terminal); - registerProperties("es.pidfile", "es.my.param"); - - ExitStatus status = parser.execute("start", "--pidfile", "foo with space", "--my.param", "my awesome neighbour"); - assertStatus(status, OK); - assertSystemProperty("es.pidfile", "foo with space"); - assertSystemProperty("es.my.param", "my awesome neighbour"); - - } - - public void testThatHelpfulErrorMessageIsGivenWhenParametersAreOutOfOrder() throws Exception { - BootstrapCLIParser parser = new BootstrapCLIParser(terminal); + public void testConfigMalformed() throws Exception { UserError e = expectThrows(UserError.class, () -> { - parser.parse("start", new String[]{"--foo=bar", "-Dbaz=qux"}); + execute("-Dfoo"); }); - assertThat(e.getMessage(), containsString("must be before any parameters starting with --")); - assertNull(System.getProperty("es.foo")); + assertTrue(e.getMessage(), e.getMessage().contains("Malformed elasticsearch setting")); + } + + public void testUnknownOption() throws Exception { + OptionException e = expectThrows(OptionException.class, () -> { + execute("--network.host"); + }); + assertTrue(e.getMessage(), e.getMessage().contains("network.host is not a recognized option")); } private void registerProperties(String ... systemProperties) { @@ -266,8 +161,4 @@ public class BootstrapCliParserTests extends CliToolTestCase { String msg = String.format(Locale.ROOT, "Expected property %s to be %s, terminal output was %s", name, expectedValue, terminal.getOutput()); assertThat(msg, System.getProperty(name), is(expectedValue)); } - - private void assertStatus(ExitStatus status, ExitStatus expectedStatus) throws Exception { - assertThat(String.format(Locale.ROOT, "Expected status to be [%s], but was [%s], terminal output was %s", expectedStatus, status, terminal.getOutput()), status, is(expectedStatus)); - } } diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/common/cli/CheckFileCommandTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/common/cli/CheckFileCommandTests.java deleted file mode 100644 index 485886b5cf4..00000000000 --- a/qa/evil-tests/src/test/java/org/elasticsearch/common/cli/CheckFileCommandTests.java +++ /dev/null @@ -1,329 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.cli; - -import com.google.common.jimfs.Configuration; -import com.google.common.jimfs.Jimfs; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.set.Sets; -import org.elasticsearch.env.Environment; -import org.elasticsearch.test.ESTestCase; - -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.nio.file.FileSystem; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.attribute.GroupPrincipal; -import java.nio.file.attribute.PosixFileAttributeView; -import java.nio.file.attribute.PosixFileAttributes; -import java.nio.file.attribute.PosixFilePermission; -import java.nio.file.attribute.UserPrincipal; -import java.util.Set; - -import static org.hamcrest.Matchers.allOf; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.hasItem; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.is; - -/** - * - */ -public class CheckFileCommandTests extends ESTestCase { - - private MockTerminal captureOutputTerminal = new MockTerminal(); - - private Configuration jimFsConfiguration = Configuration.unix().toBuilder().setAttributeViews("basic", "owner", "posix", "unix").build(); - private Configuration jimFsConfigurationWithoutPermissions = randomBoolean() ? Configuration.unix().toBuilder().setAttributeViews("basic").build() : Configuration.windows(); - - private enum Mode { - CHANGE, KEEP, DISABLED - } - - public void testThatCommandLogsErrorMessageOnFail() throws Exception { - executeCommand(jimFsConfiguration, new PermissionCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.CHANGE)); - assertThat(captureOutputTerminal.getOutput(), containsString("Please ensure that the user account running Elasticsearch has read access to this file")); - } - - public void testThatCommandLogsNothingWhenPermissionRemains() throws Exception { - executeCommand(jimFsConfiguration, new PermissionCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.KEEP)); - assertTrue(captureOutputTerminal.getOutput().isEmpty()); - } - - public void testThatCommandLogsNothingWhenDisabled() throws Exception { - executeCommand(jimFsConfiguration, new PermissionCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.DISABLED)); - assertTrue(captureOutputTerminal.getOutput().isEmpty()); - } - - public void testThatCommandLogsNothingIfFilesystemDoesNotSupportPermissions() throws Exception { - executeCommand(jimFsConfigurationWithoutPermissions, new PermissionCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.DISABLED)); - assertTrue(captureOutputTerminal.getOutput().isEmpty()); - } - - public void testThatCommandLogsOwnerChange() throws Exception { - executeCommand(jimFsConfiguration, new OwnerCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.CHANGE)); - assertThat(captureOutputTerminal.getOutput(), allOf(containsString("Owner of file ["), containsString("] used to be ["), containsString("], but now is ["))); - } - - public void testThatCommandLogsNothingIfOwnerRemainsSame() throws Exception { - executeCommand(jimFsConfiguration, new OwnerCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.KEEP)); - assertTrue(captureOutputTerminal.getOutput().isEmpty()); - } - - public void testThatCommandLogsNothingIfOwnerIsDisabled() throws Exception { - executeCommand(jimFsConfiguration, new OwnerCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.DISABLED)); - assertTrue(captureOutputTerminal.getOutput().isEmpty()); - } - - public void testThatCommandLogsNothingIfFileSystemDoesNotSupportOwners() throws Exception { - executeCommand(jimFsConfigurationWithoutPermissions, new OwnerCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.DISABLED)); - assertTrue(captureOutputTerminal.getOutput().isEmpty()); - } - - public void testThatCommandLogsIfGroupChanges() throws Exception { - executeCommand(jimFsConfiguration, new GroupCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.CHANGE)); - assertThat(captureOutputTerminal.getOutput(), allOf(containsString("Group of file ["), containsString("] used to be ["), containsString("], but now is ["))); - } - - public void testThatCommandLogsNothingIfGroupRemainsSame() throws Exception { - executeCommand(jimFsConfiguration, new GroupCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.KEEP)); - assertTrue(captureOutputTerminal.getOutput().isEmpty()); - } - - public void testThatCommandLogsNothingIfGroupIsDisabled() throws Exception { - executeCommand(jimFsConfiguration, new GroupCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.DISABLED)); - assertTrue(captureOutputTerminal.getOutput().isEmpty()); - } - - public void testThatCommandLogsNothingIfFileSystemDoesNotSupportGroups() throws Exception { - executeCommand(jimFsConfigurationWithoutPermissions, new GroupCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.DISABLED)); - assertTrue(captureOutputTerminal.getOutput().isEmpty()); - } - - public void testThatCommandDoesNotLogAnythingOnFileCreation() throws Exception { - Configuration configuration = randomBoolean() ? jimFsConfiguration : jimFsConfigurationWithoutPermissions; - - try (FileSystem fs = Jimfs.newFileSystem(configuration)) { - Path path = fs.getPath(randomAsciiOfLength(10)); - Settings settings = Settings.builder() - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - .build(); - new CreateFileCommand(captureOutputTerminal, path).execute(settings, new Environment(settings)); - assertThat(Files.exists(path), is(true)); - } - - assertTrue(captureOutputTerminal.getOutput().isEmpty()); - } - - public void testThatCommandWorksIfFileIsDeletedByCommand() throws Exception { - Configuration configuration = randomBoolean() ? jimFsConfiguration : jimFsConfigurationWithoutPermissions; - - try (FileSystem fs = Jimfs.newFileSystem(configuration)) { - Path path = fs.getPath(randomAsciiOfLength(10)); - Files.write(path, "anything".getBytes(StandardCharsets.UTF_8)); - - Settings settings = Settings.builder() - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - .build(); - new DeleteFileCommand(captureOutputTerminal, path).execute(settings, new Environment(settings)); - assertThat(Files.exists(path), is(false)); - } - - assertTrue(captureOutputTerminal.getOutput().isEmpty()); - } - - private void executeCommand(Configuration configuration, AbstractTestCheckFileCommand command) throws Exception { - try (FileSystem fs = Jimfs.newFileSystem(configuration)) { - command.execute(fs); - } - } - - abstract class AbstractTestCheckFileCommand extends CheckFileCommand { - - protected final Mode mode; - protected FileSystem fs; - protected Path[] paths; - final Path baseDir; - - public AbstractTestCheckFileCommand(Path baseDir, Terminal terminal, Mode mode) throws IOException { - super(terminal); - this.mode = mode; - this.baseDir = baseDir; - } - - public CliTool.ExitStatus execute(FileSystem fs) throws Exception { - this.fs = fs; - this.paths = new Path[] { writePath(fs, "p1", "anything"), writePath(fs, "p2", "anything"), writePath(fs, "p3", "anything") }; - Settings settings = Settings.settingsBuilder() - .put(Environment.PATH_HOME_SETTING.getKey(), baseDir.toString()) - .build(); - return super.execute(Settings.EMPTY, new Environment(settings)); - } - - private Path writePath(FileSystem fs, String name, String content) throws IOException { - Path path = fs.getPath(name); - Files.write(path, content.getBytes(StandardCharsets.UTF_8)); - return path; - } - - @Override - protected Path[] pathsForPermissionsCheck(Settings settings, Environment env) { - return paths; - } - } - - /** - * command that changes permissions from a file if enabled - */ - class PermissionCheckFileCommand extends AbstractTestCheckFileCommand { - - public PermissionCheckFileCommand(Path baseDir, Terminal terminal, Mode mode) throws IOException { - super(baseDir, terminal, mode); - } - - @Override - public CliTool.ExitStatus doExecute(Settings settings, Environment env) throws Exception { - int randomInt = randomInt(paths.length - 1); - Path randomPath = paths[randomInt]; - switch (mode) { - case CHANGE: - Files.write(randomPath, randomAsciiOfLength(10).getBytes(StandardCharsets.UTF_8)); - Files.setPosixFilePermissions(randomPath, Sets.newHashSet(PosixFilePermission.OWNER_EXECUTE, PosixFilePermission.OTHERS_EXECUTE, PosixFilePermission.GROUP_EXECUTE)); - break; - case KEEP: - Files.write(randomPath, randomAsciiOfLength(10).getBytes(StandardCharsets.UTF_8)); - Set posixFilePermissions = Files.getPosixFilePermissions(randomPath); - Files.setPosixFilePermissions(randomPath, posixFilePermissions); - break; - } - return CliTool.ExitStatus.OK; - } - - } - - /** - * command that changes the owner of a file if enabled - */ - class OwnerCheckFileCommand extends AbstractTestCheckFileCommand { - - public OwnerCheckFileCommand(Path baseDir, Terminal terminal, Mode mode) throws IOException { - super(baseDir, terminal, mode); - } - - @Override - public CliTool.ExitStatus doExecute(Settings settings, Environment env) throws Exception { - int randomInt = randomInt(paths.length - 1); - Path randomPath = paths[randomInt]; - switch (mode) { - case CHANGE: - Files.write(randomPath, randomAsciiOfLength(10).getBytes(StandardCharsets.UTF_8)); - UserPrincipal randomOwner = fs.getUserPrincipalLookupService().lookupPrincipalByName(randomAsciiOfLength(10)); - Files.setOwner(randomPath, randomOwner); - break; - case KEEP: - Files.write(randomPath, randomAsciiOfLength(10).getBytes(StandardCharsets.UTF_8)); - UserPrincipal originalOwner = Files.getOwner(randomPath); - Files.setOwner(randomPath, originalOwner); - break; - } - - return CliTool.ExitStatus.OK; - } - } - - /** - * command that changes the group of a file if enabled - */ - class GroupCheckFileCommand extends AbstractTestCheckFileCommand { - - public GroupCheckFileCommand(Path baseDir, Terminal terminal, Mode mode) throws IOException { - super(baseDir, terminal, mode); - } - - @Override - public CliTool.ExitStatus doExecute(Settings settings, Environment env) throws Exception { - int randomInt = randomInt(paths.length - 1); - Path randomPath = paths[randomInt]; - switch (mode) { - case CHANGE: - Files.write(randomPath, randomAsciiOfLength(10).getBytes(StandardCharsets.UTF_8)); - GroupPrincipal randomPrincipal = fs.getUserPrincipalLookupService().lookupPrincipalByGroupName(randomAsciiOfLength(10)); - Files.getFileAttributeView(randomPath, PosixFileAttributeView.class).setGroup(randomPrincipal); - break; - case KEEP: - Files.write(randomPath, randomAsciiOfLength(10).getBytes(StandardCharsets.UTF_8)); - GroupPrincipal groupPrincipal = Files.readAttributes(randomPath, PosixFileAttributes.class).group(); - Files.getFileAttributeView(randomPath, PosixFileAttributeView.class).setGroup(groupPrincipal); - break; - } - - return CliTool.ExitStatus.OK; - } - } - - /** - * A command that creates a non existing file - */ - class CreateFileCommand extends CheckFileCommand { - - private final Path pathToCreate; - - public CreateFileCommand(Terminal terminal, Path pathToCreate) { - super(terminal); - this.pathToCreate = pathToCreate; - } - - @Override - public CliTool.ExitStatus doExecute(Settings settings, Environment env) throws Exception { - Files.write(pathToCreate, "anything".getBytes(StandardCharsets.UTF_8)); - return CliTool.ExitStatus.OK; - } - - @Override - protected Path[] pathsForPermissionsCheck(Settings settings, Environment env) throws Exception { - return new Path[] { pathToCreate }; - } - } - - /** - * A command that deletes an existing file - */ - class DeleteFileCommand extends CheckFileCommand { - - private final Path pathToDelete; - - public DeleteFileCommand(Terminal terminal, Path pathToDelete) { - super(terminal); - this.pathToDelete = pathToDelete; - } - - @Override - public CliTool.ExitStatus doExecute(Settings settings, Environment env) throws Exception { - Files.delete(pathToDelete); - return CliTool.ExitStatus.OK; - } - - @Override - protected Path[] pathsForPermissionsCheck(Settings settings, Environment env) throws Exception { - return new Path[] {pathToDelete}; - } - } -} diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/common/cli/CliToolTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/common/cli/CliToolTests.java deleted file mode 100644 index 144a12f141b..00000000000 --- a/qa/evil-tests/src/test/java/org/elasticsearch/common/cli/CliToolTests.java +++ /dev/null @@ -1,349 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.cli; - -import org.apache.commons.cli.CommandLine; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; -import org.elasticsearch.node.internal.InternalSettingsPreparer; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; - -import static java.util.Collections.unmodifiableMap; -import static org.elasticsearch.common.cli.CliTool.ExitStatus.OK; -import static org.elasticsearch.common.cli.CliTool.ExitStatus.USAGE; -import static org.elasticsearch.common.cli.CliToolConfig.Builder.cmd; -import static org.hamcrest.Matchers.arrayContaining; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.hasItem; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.is; - -@SuppressForbidden(reason = "modifies system properties intentionally") -public class CliToolTests extends CliToolTestCase { - public void testOK() throws Exception { - Terminal terminal = new MockTerminal(); - final AtomicReference executed = new AtomicReference<>(false); - final NamedCommand cmd = new NamedCommand("cmd", terminal) { - @Override - public CliTool.ExitStatus execute(Settings settings, Environment env) { - executed.set(true); - return OK; - } - }; - SingleCmdTool tool = new SingleCmdTool("tool", terminal, cmd); - CliTool.ExitStatus status = tool.execute(); - assertStatus(status, OK); - assertCommandHasBeenExecuted(executed); - } - - public void testUsageError() throws Exception { - Terminal terminal = new MockTerminal(); - final AtomicReference executed = new AtomicReference<>(false); - final NamedCommand cmd = new NamedCommand("cmd", terminal) { - @Override - public CliTool.ExitStatus execute(Settings settings, Environment env) throws UserError { - executed.set(true); - throw new UserError(CliTool.ExitStatus.USAGE, "bad usage"); - } - }; - SingleCmdTool tool = new SingleCmdTool("tool", terminal, cmd); - CliTool.ExitStatus status = tool.execute(); - assertStatus(status, CliTool.ExitStatus.USAGE); - assertCommandHasBeenExecuted(executed); - } - - public void testMultiCommand() throws Exception { - Terminal terminal = new MockTerminal(); - int count = randomIntBetween(2, 7); - List> executed = new ArrayList<>(count); - for (int i = 0; i < count; i++) { - executed.add(new AtomicReference<>(false)); - } - NamedCommand[] cmds = new NamedCommand[count]; - for (int i = 0; i < count; i++) { - final int index = i; - cmds[i] = new NamedCommand("cmd" + index, terminal) { - @Override - public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception { - executed.get(index).set(true); - return OK; - } - }; - } - MultiCmdTool tool = new MultiCmdTool("tool", terminal, cmds); - int cmdIndex = randomIntBetween(0, count-1); - CliTool.ExitStatus status = tool.execute("cmd" + cmdIndex); - assertThat(status, is(OK)); - for (int i = 0; i < count; i++) { - assertThat(executed.get(i).get(), is(i == cmdIndex)); - } - } - - public void testMultiCommandUnknownCommand() throws Exception { - Terminal terminal = new MockTerminal(); - int count = randomIntBetween(2, 7); - List> executed = new ArrayList<>(count); - for (int i = 0; i < count; i++) { - executed.add(new AtomicReference<>(false)); - } - NamedCommand[] cmds = new NamedCommand[count]; - for (int i = 0; i < count; i++) { - final int index = i; - cmds[i] = new NamedCommand("cmd" + index, terminal) { - @Override - public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception { - executed.get(index).set(true); - return OK; - } - }; - } - MultiCmdTool tool = new MultiCmdTool("tool", terminal, cmds); - CliTool.ExitStatus status = tool.execute("cmd" + count); // "cmd" + count doesn't exist - assertThat(status, is(CliTool.ExitStatus.USAGE)); - for (int i = 0; i < count; i++) { - assertThat(executed.get(i).get(), is(false)); - } - } - - public void testSingleCommandToolHelp() throws Exception { - MockTerminal terminal = new MockTerminal(); - final AtomicReference executed = new AtomicReference<>(false); - final NamedCommand cmd = new NamedCommand("cmd1", terminal) { - @Override - public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception { - executed.set(true); - throw new IOException("io error"); - } - }; - SingleCmdTool tool = new SingleCmdTool("tool", terminal, cmd); - CliTool.ExitStatus status = tool.execute(args("-h")); - assertStatus(status, CliTool.ExitStatus.OK_AND_EXIT); - assertThat(terminal.getOutput(), containsString("cmd1 help")); - } - - public void testMultiCommandToolHelp() throws Exception { - MockTerminal terminal = new MockTerminal(); - NamedCommand[] cmds = new NamedCommand[2]; - cmds[0] = new NamedCommand("cmd0", terminal) { - @Override - public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception { - return OK; - } - }; - cmds[1] = new NamedCommand("cmd1", terminal) { - @Override - public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception { - return OK; - } - }; - MultiCmdTool tool = new MultiCmdTool("tool", terminal, cmds); - CliTool.ExitStatus status = tool.execute(args("-h")); - assertStatus(status, CliTool.ExitStatus.OK_AND_EXIT); - assertThat(terminal.getOutput(), containsString("tool help")); - } - - public void testMultiCommandCmdHelp() throws Exception { - MockTerminal terminal = new MockTerminal(); - NamedCommand[] cmds = new NamedCommand[2]; - cmds[0] = new NamedCommand("cmd0", terminal) { - @Override - public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception { - return OK; - } - }; - cmds[1] = new NamedCommand("cmd1", terminal) { - @Override - public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception { - return OK; - } - }; - MultiCmdTool tool = new MultiCmdTool("tool", terminal, cmds); - CliTool.ExitStatus status = tool.execute(args("cmd1 -h")); - assertStatus(status, CliTool.ExitStatus.OK_AND_EXIT); - assertThat(terminal.getOutput(), containsString("cmd1 help")); - } - - public void testNonUserErrorPropagates() throws Exception { - MockTerminal terminal = new MockTerminal(); - NamedCommand cmd = new NamedCommand("cmd", terminal) { - @Override - public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception { - throw new IOException("error message"); - } - }; - SingleCmdTool tool = new SingleCmdTool("tool", terminal, cmd); - IOException e = expectThrows(IOException.class, () -> { - tool.execute(); - }); - assertEquals("error message", e.getMessage()); - } - - public void testMultipleLaunch() throws Exception { - Terminal terminal = new MockTerminal(); - final AtomicReference executed = new AtomicReference<>(false); - final NamedCommand cmd = new NamedCommand("cmd", terminal) { - @Override - public CliTool.ExitStatus execute(Settings settings, Environment env) { - executed.set(true); - return OK; - } - }; - SingleCmdTool tool = new SingleCmdTool("tool", terminal, cmd); - tool.parse("cmd", Strings.splitStringByCommaToArray("--verbose")); - tool.parse("cmd", Strings.splitStringByCommaToArray("--silent")); - tool.parse("cmd", Strings.splitStringByCommaToArray("--help")); - } - - public void testPromptForSetting() throws Exception { - final AtomicReference promptedSecretValue = new AtomicReference<>(null); - final AtomicReference promptedTextValue = new AtomicReference<>(null); - final MockTerminal terminal = new MockTerminal(); - terminal.addTextInput("replaced"); - terminal.addSecretInput("changeit"); - final NamedCommand cmd = new NamedCommand("noop", terminal) { - @Override - public CliTool.ExitStatus execute(Settings settings, Environment env) { - promptedSecretValue.set(settings.get("foo.password")); - promptedTextValue.set(settings.get("replace")); - return OK; - } - }; - - System.setProperty("es.foo.password", InternalSettingsPreparer.SECRET_PROMPT_VALUE); - System.setProperty("es.replace", InternalSettingsPreparer.TEXT_PROMPT_VALUE); - try { - new SingleCmdTool("tool", terminal, cmd).execute(); - } finally { - System.clearProperty("es.foo.password"); - System.clearProperty("es.replace"); - } - - assertThat(promptedSecretValue.get(), is("changeit")); - assertThat(promptedTextValue.get(), is("replaced")); - } - - public void testStopAtNonOptionParsing() throws Exception { - final CliToolConfig.Cmd lenientCommand = cmd("lenient", CliTool.Command.Exit.class).stopAtNonOption(true).build(); - final CliToolConfig.Cmd strictCommand = cmd("strict", CliTool.Command.Exit.class).stopAtNonOption(false).build(); - final CliToolConfig config = CliToolConfig.config("elasticsearch", CliTool.class).cmds(lenientCommand, strictCommand).build(); - - MockTerminal terminal = new MockTerminal(); - final CliTool cliTool = new CliTool(config, terminal) { - @Override - protected Command parse(String cmdName, CommandLine cli) throws Exception { - return new NamedCommand(cmdName, terminal) { - @Override - public ExitStatus execute(Settings settings, Environment env) throws Exception { - return OK; - } - }; - } - }; - - // known parameters, no error - assertStatus(cliTool.execute(args("lenient --verbose")), OK); - assertStatus(cliTool.execute(args("lenient -v")), OK); - - // unknown parameters, no error - assertStatus(cliTool.execute(args("lenient --unknown")), OK); - assertStatus(cliTool.execute(args("lenient -u")), OK); - - // unknown parameters, error - assertStatus(cliTool.execute(args("strict --unknown")), USAGE); - assertThat(terminal.getOutput(), containsString("Unrecognized option: --unknown")); - - terminal.resetOutput(); - assertStatus(cliTool.execute(args("strict -u")), USAGE); - assertThat(terminal.getOutput(), containsString("Unrecognized option: -u")); - } - - private void assertStatus(CliTool.ExitStatus status, CliTool.ExitStatus expectedStatus) { - assertThat(status, is(expectedStatus)); - } - - private void assertCommandHasBeenExecuted(AtomicReference executed) { - assertThat("Expected command atomic reference counter to be set to true", executed.get(), is(Boolean.TRUE)); - } - - private static class SingleCmdTool extends CliTool { - - private final Command command; - - private SingleCmdTool(String name, Terminal terminal, NamedCommand command) { - super(CliToolConfig.config(name, SingleCmdTool.class) - .cmds(cmd(command.name, command.getClass())) - .build(), terminal); - this.command = command; - } - - @Override - protected Command parse(String cmdName, CommandLine cli) throws Exception { - return command; - } - } - - private static class MultiCmdTool extends CliTool { - - private final Map commands; - - private MultiCmdTool(String name, Terminal terminal, NamedCommand... commands) { - super(CliToolConfig.config(name, MultiCmdTool.class) - .cmds(cmds(commands)) - .build(), terminal); - Map commandByName = new HashMap<>(); - for (int i = 0; i < commands.length; i++) { - commandByName.put(commands[i].name, commands[i]); - } - this.commands = unmodifiableMap(commandByName); - } - - @Override - protected Command parse(String cmdName, CommandLine cli) throws Exception { - return commands.get(cmdName); - } - - private static CliToolConfig.Cmd[] cmds(NamedCommand... commands) { - CliToolConfig.Cmd[] cmds = new CliToolConfig.Cmd[commands.length]; - for (int i = 0; i < commands.length; i++) { - cmds[i] = cmd(commands[i].name, commands[i].getClass()).build(); - } - return cmds; - } - } - - private static abstract class NamedCommand extends CliTool.Command { - - private final String name; - - private NamedCommand(String name, Terminal terminal) { - super(terminal); - this.name = name; - } - } -} diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java index 86f02fe6f30..fb69c817f3a 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java @@ -44,11 +44,8 @@ import java.util.zip.ZipOutputStream; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.Version; -import org.elasticsearch.common.cli.CliTool; -import org.elasticsearch.common.cli.CliToolTestCase; -import org.elasticsearch.common.cli.MockTerminal; -import org.elasticsearch.common.cli.Terminal; -import org.elasticsearch.common.cli.UserError; +import org.elasticsearch.cli.MockTerminal; +import org.elasticsearch.cli.UserError; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.test.ESTestCase; @@ -119,8 +116,7 @@ public class InstallPluginCommandTests extends ESTestCase { static MockTerminal installPlugin(String pluginUrl, Environment env) throws Exception { MockTerminal terminal = new MockTerminal(); - CliTool.ExitStatus status = new InstallPluginCommand(terminal, pluginUrl, true).execute(env.settings(), env); - assertEquals(CliTool.ExitStatus.OK, status); + new InstallPluginCommand(env).execute(terminal, pluginUrl, true); return terminal; } @@ -470,6 +466,18 @@ public class InstallPluginCommandTests extends ESTestCase { assertInstallCleaned(env); } + public void testZipRelativeOutsideEntryName() throws Exception { + Path zip = createTempDir().resolve("broken.zip"); + try (ZipOutputStream stream = new ZipOutputStream(Files.newOutputStream(zip))) { + stream.putNextEntry(new ZipEntry("elasticsearch/../blah")); + } + String pluginZip = zip.toUri().toURL().toString(); + IOException e = expectThrows(IOException.class, () -> { + installPlugin(pluginZip, createEnv()); + }); + assertTrue(e.getMessage(), e.getMessage().contains("resolving outside of plugin directory")); + } + // TODO: test batch flag? // TODO: test checksum (need maven/official below) // TODO: test maven, official, and staging install...need tests with fixtures... diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java index c86a6464eb0..cbdd031dea1 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java @@ -22,14 +22,10 @@ package org.elasticsearch.plugins; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; -import java.util.Collections; -import java.util.List; import org.apache.lucene.util.LuceneTestCase; -import org.elasticsearch.common.cli.CliTool; -import org.elasticsearch.common.cli.CliToolTestCase; -import org.elasticsearch.common.cli.MockTerminal; -import org.elasticsearch.common.cli.Terminal; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.MockTerminal; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.test.ESTestCase; @@ -48,8 +44,9 @@ public class ListPluginsCommandTests extends ESTestCase { static MockTerminal listPlugins(Environment env) throws Exception { MockTerminal terminal = new MockTerminal(); - CliTool.ExitStatus status = new ListPluginsCommand(terminal).execute(env.settings(), env); - assertEquals(CliTool.ExitStatus.OK, status); + String[] args = {}; + int status = new ListPluginsCommand(env).main(args, terminal); + assertEquals(ExitCodes.OK, status); return terminal; } diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/PluginSecurityTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/PluginSecurityTests.java index acc300c6cf5..466f7d05cd1 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/PluginSecurityTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/PluginSecurityTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.plugins; -import org.elasticsearch.common.cli.Terminal; +import org.elasticsearch.cli.Terminal; import org.elasticsearch.test.ESTestCase; import java.nio.file.Path; diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java index 0bfdf5c34a8..d9d5661b834 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java @@ -25,11 +25,8 @@ import java.nio.file.Files; import java.nio.file.Path; import org.apache.lucene.util.LuceneTestCase; -import org.elasticsearch.common.cli.CliTool; -import org.elasticsearch.common.cli.CliToolTestCase; -import org.elasticsearch.common.cli.MockTerminal; -import org.elasticsearch.common.cli.Terminal; -import org.elasticsearch.common.cli.UserError; +import org.elasticsearch.cli.UserError; +import org.elasticsearch.cli.MockTerminal; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.test.ESTestCase; @@ -51,8 +48,7 @@ public class RemovePluginCommandTests extends ESTestCase { static MockTerminal removePlugin(String name, Environment env) throws Exception { MockTerminal terminal = new MockTerminal(); - CliTool.ExitStatus status = new RemovePluginCommand(terminal, name).execute(env.settings(), env); - assertEquals(CliTool.ExitStatus.OK, status); + new RemovePluginCommand(env).execute(terminal, name); return terminal; } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.recovery/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.recovery/10_basic.yaml index 432b0e50ae4..820cf6dec4d 100755 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.recovery/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.recovery/10_basic.yaml @@ -35,14 +35,16 @@ [-\w./]+ \s+ # repository [-\w./]+ \s+ # snapshot \d+ \s+ # files + \d+ \s+ # files_recovered \d+\.\d+% \s+ # files_percent + \d+ \s+ # files_total \d+ \s+ # bytes + \d+ \s+ # bytes_recovered \d+\.\d+% \s+ # bytes_percent - \d+ \s+ # total_files - \d+ \s+ # total_bytes - \d+ \s+ # translog - -?\d+\.\d+% \s+ # translog_percent - -?\d+ # total_translog + \d+ \s+ # bytes_total + -?\d+ \s+ # translog_ops + \d+ \s+ # translog_ops_recovered + -?\d+\.\d+% # translog_ops_percent \n )+ $/ diff --git a/settings.gradle b/settings.gradle index f2518e69b12..b1bb374fff1 100644 --- a/settings.gradle +++ b/settings.gradle @@ -11,6 +11,7 @@ List projects = [ 'test:framework', 'test:fixtures:example-fixture', 'test:fixtures:hdfs-fixture', + 'test:logger-usage', 'modules:ingest-grok', 'modules:lang-expression', 'modules:lang-groovy', diff --git a/test/build.gradle b/test/build.gradle index 7e1b5725147..fcf4f5bb761 100644 --- a/test/build.gradle +++ b/test/build.gradle @@ -30,8 +30,9 @@ subprojects { // the main files are actually test files, so use the appropriate forbidden api sigs forbiddenApisMain { bundledSignatures = ['jdk-unsafe', 'jdk-deprecated'] - signaturesURLs = [PrecommitTasks.getResource('/forbidden/all-signatures.txt'), - PrecommitTasks.getResource('/forbidden/test-signatures.txt')] + signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt'), + PrecommitTasks.getResource('/forbidden/es-signatures.txt'), + PrecommitTasks.getResource('/forbidden/es-test-signatures.txt')] } // TODO: should we have licenses for our test deps? diff --git a/test/framework/src/main/java/org/elasticsearch/cli/CommandTestCase.java b/test/framework/src/main/java/org/elasticsearch/cli/CommandTestCase.java new file mode 100644 index 00000000000..e9c6a2eec9c --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/cli/CommandTestCase.java @@ -0,0 +1,56 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cli; + +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; + +/** + * A base test case for cli tools. + */ +public abstract class CommandTestCase extends ESTestCase { + + /** The terminal that execute uses. */ + protected final MockTerminal terminal = new MockTerminal(); + + /** The last command that was executed. */ + protected Command command; + + @Before + public void resetTerminal() { + terminal.reset(); + terminal.setVerbosity(Terminal.Verbosity.NORMAL); + } + + /** Creates a Command to test execution. */ + protected abstract Command newCommand(); + + /** + * Runs the command with the given args. + * + * Output can be found in {@link #terminal}. + * The command created can be found in {@link #command}. + */ + public String execute(String... args) throws Exception { + command = newCommand(); + command.mainWithoutErrorHandling(args, terminal); + return terminal.getOutput(); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/common/cli/MockTerminal.java b/test/framework/src/main/java/org/elasticsearch/cli/MockTerminal.java similarity index 87% rename from test/framework/src/main/java/org/elasticsearch/common/cli/MockTerminal.java rename to test/framework/src/main/java/org/elasticsearch/cli/MockTerminal.java index 3b2903b3fab..bd8bd493cea 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/cli/MockTerminal.java +++ b/test/framework/src/main/java/org/elasticsearch/cli/MockTerminal.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.common.cli; +package org.elasticsearch.cli; import java.io.ByteArrayOutputStream; import java.io.OutputStreamWriter; @@ -45,7 +45,7 @@ public class MockTerminal extends Terminal { @Override public String readText(String prompt) { if (textInput.isEmpty()) { - return null; + throw new IllegalStateException("No text input configured for prompt [" + prompt + "]"); } return textInput.removeFirst(); } @@ -53,7 +53,7 @@ public class MockTerminal extends Terminal { @Override public char[] readSecret(String prompt) { if (secretInput.isEmpty()) { - return null; + throw new IllegalStateException("No secret input configured for prompt [" + prompt + "]"); } return secretInput.removeFirst().toCharArray(); } @@ -78,8 +78,10 @@ public class MockTerminal extends Terminal { return buffer.toString("UTF-8"); } - /** Wipes the output. */ - public void resetOutput() { + /** Wipes the input and output. */ + public void reset() { buffer.reset(); + textInput.clear(); + secretInput.clear(); } } diff --git a/test/framework/src/main/java/org/elasticsearch/common/cli/CliToolTestCase.java b/test/framework/src/main/java/org/elasticsearch/common/cli/CliToolTestCase.java index 06c197a05da..576ecf2d1ee 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/cli/CliToolTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/common/cli/CliToolTestCase.java @@ -21,6 +21,7 @@ package org.elasticsearch.common.cli; import java.io.IOException; +import org.elasticsearch.cli.MockTerminal; import org.elasticsearch.common.Strings; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.test.ESTestCase; diff --git a/test/framework/src/main/java/org/elasticsearch/test/CorruptionUtils.java b/test/framework/src/main/java/org/elasticsearch/test/CorruptionUtils.java index 15b72c4cccd..916adc142c8 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/CorruptionUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/CorruptionUtils.java @@ -88,11 +88,11 @@ public final class CorruptionUtils { // we need to add assumptions here that the checksums actually really don't match there is a small chance to get collisions // in the checksum which is ok though.... StringBuilder msg = new StringBuilder(); - msg.append("Checksum before: [").append(checksumBeforeCorruption).append("]"); - msg.append(" after: [").append(checksumAfterCorruption).append("]"); - msg.append(" checksum value after corruption: ").append(actualChecksumAfterCorruption).append("]"); - msg.append(" file: ").append(fileToCorrupt.getFileName()).append(" length: ").append(dir.fileLength(fileToCorrupt.getFileName().toString())); - logger.info(msg.toString()); + msg.append("before: [").append(checksumBeforeCorruption).append("] "); + msg.append("after: [").append(checksumAfterCorruption).append("] "); + msg.append("checksum value after corruption: ").append(actualChecksumAfterCorruption).append("] "); + msg.append("file: ").append(fileToCorrupt.getFileName()).append(" length: ").append(dir.fileLength(fileToCorrupt.getFileName().toString())); + logger.info("Checksum {}", msg); assumeTrue("Checksum collision - " + msg.toString(), checksumAfterCorruption != checksumBeforeCorruption // collision || actualChecksumAfterCorruption != checksumBeforeCorruption); // checksum corrupted diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 0098c4ce9c8..235ccec4ee0 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -78,6 +78,7 @@ import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.common.transport.InetSocketTransportAddress; @@ -198,7 +199,7 @@ import static org.hamcrest.Matchers.startsWith; * should be used, here is an example: *
  *
- * {@literal @}ClusterScope(scope=Scope.TEST) public class SomeIT extends ESIntegTestCase {
+ * {@literal @}NodeScope(scope=Scope.TEST) public class SomeIT extends ESIntegTestCase {
  * public void testMethod() {}
  * }
  * 
@@ -209,7 +210,7 @@ import static org.hamcrest.Matchers.startsWith; * determined at random and can change across tests. The {@link ClusterScope} allows configuring the initial number of nodes * that are created before the tests start. *
- * {@literal @}ClusterScope(scope=Scope.SUITE, numDataNodes=3)
+ * {@literal @}NodeScope(scope=Scope.SUITE, numDataNodes=3)
  * public class SomeIT extends ESIntegTestCase {
  * public void testMethod() {}
  * }
@@ -270,7 +271,8 @@ public abstract class ESIntegTestCase extends ESTestCase {
      * The value of this seed can be used to initialize a random context for a specific index.
      * It's set once per test via a generic index template.
      */
-    public static final Setting INDEX_TEST_SEED_SETTING = Setting.longSetting("index.tests.seed", 0, Long.MIN_VALUE, false, Setting.Scope.INDEX);
+    public static final Setting INDEX_TEST_SEED_SETTING =
+        Setting.longSetting("index.tests.seed", 0, Long.MIN_VALUE, Property.IndexScope);
 
     /**
      * A boolean value to enable or disable mock modules. This is useful to test the
@@ -883,7 +885,7 @@ public abstract class ESIntegTestCase extends ESTestCase {
                 sb.append("\n-> _index: [").append(hit.getIndex()).append("] type [").append(hit.getType())
                         .append("] id [").append(hit.id()).append("]");
             }
-            logger.warn(sb.toString());
+            logger.warn("{}", sb);
             fail(failMsg);
         }
     }
diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalSettingsPlugin.java b/test/framework/src/main/java/org/elasticsearch/test/InternalSettingsPlugin.java
index d8a8a9304a1..b57afd5df2d 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/InternalSettingsPlugin.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/InternalSettingsPlugin.java
@@ -20,6 +20,7 @@ package org.elasticsearch.test;
 
 import org.elasticsearch.cluster.metadata.IndexMetaData;
 import org.elasticsearch.common.settings.Setting;
+import org.elasticsearch.common.settings.Setting.Property;
 import org.elasticsearch.common.settings.SettingsModule;
 import org.elasticsearch.plugins.Plugin;
 
@@ -34,9 +35,12 @@ public final class InternalSettingsPlugin extends Plugin {
         return "a plugin that allows to set values for internal settings which are can't be set via the ordinary API without this plugin installed";
     }
 
-    public static final Setting VERSION_CREATED = Setting.intSetting("index.version.created", 0, false, Setting.Scope.INDEX);
-    public static final Setting MERGE_ENABLED = Setting.boolSetting("index.merge.enabled", true, false, Setting.Scope.INDEX);
-    public static final Setting INDEX_CREATION_DATE_SETTING = Setting.longSetting(IndexMetaData.SETTING_CREATION_DATE, -1, -1, false, Setting.Scope.INDEX);
+    public static final Setting VERSION_CREATED =
+        Setting.intSetting("index.version.created", 0, Property.IndexScope);
+    public static final Setting MERGE_ENABLED =
+        Setting.boolSetting("index.merge.enabled", true, Property.IndexScope);
+    public static final Setting INDEX_CREATION_DATE_SETTING =
+        Setting.longSetting(IndexMetaData.SETTING_CREATION_DATE, -1, -1, Property.IndexScope);
 
     public void onModule(SettingsModule module) {
         module.registerSetting(VERSION_CREATED);
diff --git a/test/framework/src/main/java/org/elasticsearch/test/MockIndexEventListener.java b/test/framework/src/main/java/org/elasticsearch/test/MockIndexEventListener.java
index 13f533a583e..f17fe024f14 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/MockIndexEventListener.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/MockIndexEventListener.java
@@ -22,6 +22,7 @@ import org.elasticsearch.cluster.routing.ShardRouting;
 import org.elasticsearch.common.Nullable;
 import org.elasticsearch.common.inject.Module;
 import org.elasticsearch.common.settings.Setting;
+import org.elasticsearch.common.settings.Setting.Property;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.settings.SettingsModule;
 import org.elasticsearch.index.Index;
@@ -63,7 +64,7 @@ public final class MockIndexEventListener {
         /**
          * For tests to pass in to fail on listener invocation
          */
-        public static final Setting INDEX_FAIL = Setting.boolSetting("index.fail", false, false, Setting.Scope.INDEX);
+        public static final Setting INDEX_FAIL = Setting.boolSetting("index.fail", false, Property.IndexScope);
         public void onModule(SettingsModule module) {
             module.registerSetting(INDEX_FAIL);
         }
diff --git a/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java
index ddccfe88e38..bf32b6b8575 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java
@@ -31,6 +31,7 @@ import org.elasticsearch.ElasticsearchException;
 import org.elasticsearch.common.logging.ESLogger;
 import org.elasticsearch.common.logging.Loggers;
 import org.elasticsearch.common.settings.Setting;
+import org.elasticsearch.common.settings.Setting.Property;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.index.engine.Engine;
 import org.elasticsearch.index.engine.EngineConfig;
@@ -55,11 +56,13 @@ public final class MockEngineSupport {
      * Allows tests to wrap an index reader randomly with a given ratio. This is disabled by default ie. 0.0d since reader wrapping is insanely
      * slow if {@link org.apache.lucene.index.AssertingDirectoryReader} is used.
      */
-    public static final Setting WRAP_READER_RATIO = Setting.doubleSetting("index.engine.mock.random.wrap_reader_ratio", 0.0d, 0.0d, false, Setting.Scope.INDEX);
+    public static final Setting WRAP_READER_RATIO =
+        Setting.doubleSetting("index.engine.mock.random.wrap_reader_ratio", 0.0d, 0.0d, Property.IndexScope);
     /**
      * Allows tests to prevent an engine from being flushed on close ie. to test translog recovery...
      */
-    public static final Setting DISABLE_FLUSH_ON_CLOSE = Setting.boolSetting("index.mock.disable_flush_on_close", false, false, Setting.Scope.INDEX);
+    public static final Setting DISABLE_FLUSH_ON_CLOSE =
+        Setting.boolSetting("index.mock.disable_flush_on_close", false, Property.IndexScope);
 
 
     private final AtomicBoolean closing = new AtomicBoolean(false);
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestClient.java b/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestClient.java
index e5597713570..e798fd8c8ab 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestClient.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestClient.java
@@ -151,8 +151,7 @@ public class RestClient implements Closeable {
 
         HttpRequestBuilder httpRequestBuilder = callApiBuilder(apiName, requestParams, body);
         for (Map.Entry header : headers.entrySet()) {
-            logger.error("Adding header " + header.getKey());
-            logger.error(" with value " + header.getValue());
+            logger.error("Adding header {}\n with value {}", header.getKey(), header.getValue());
             httpRequestBuilder.addHeader(header.getKey(), header.getValue());
         }
         logger.debug("calling api [{}]", apiName);
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpResponse.java b/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpResponse.java
index 9945edbefa9..37fc163ac61 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpResponse.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpResponse.java
@@ -61,7 +61,7 @@ public class HttpResponse {
                 try {
                     httpResponse.close();
                 } catch (IOException e) {
-                    logger.error(e.getMessage(), e);
+                    logger.error("Failed closing response", e);
                 }
             }
         } else {
diff --git a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java
index c945a308363..c50c1ed5446 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java
@@ -23,7 +23,6 @@ import com.carrotsearch.randomizedtesting.SeedUtils;
 import com.carrotsearch.randomizedtesting.generators.RandomPicks;
 
 import org.apache.lucene.index.CheckIndex;
-import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.store.BaseDirectoryWrapper;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.LockFactory;
@@ -32,13 +31,13 @@ import org.apache.lucene.store.MockDirectoryWrapper;
 import org.apache.lucene.store.StoreRateLimiting;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestRuleMarkFailure;
-import org.elasticsearch.cluster.metadata.AliasOrIndex;
 import org.elasticsearch.cluster.metadata.IndexMetaData;
 import org.elasticsearch.common.inject.Inject;
 import org.elasticsearch.common.io.stream.BytesStreamOutput;
 import org.elasticsearch.common.logging.ESLogger;
 import org.elasticsearch.common.lucene.Lucene;
 import org.elasticsearch.common.settings.Setting;
+import org.elasticsearch.common.settings.Setting.Property;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.index.IndexModule;
 import org.elasticsearch.index.IndexSettings;
@@ -57,17 +56,20 @@ import java.io.PrintStream;
 import java.nio.charset.StandardCharsets;
 import java.nio.file.Path;
 import java.util.Arrays;
-import java.util.Collections;
 import java.util.Random;
-import java.util.Set;
 
 public class MockFSDirectoryService extends FsDirectoryService {
 
-    public static final Setting RANDOM_IO_EXCEPTION_RATE_ON_OPEN_SETTING = Setting.doubleSetting("index.store.mock.random.io_exception_rate_on_open", 0.0d,  0.0d, false, Setting.Scope.INDEX);
-    public static final Setting RANDOM_IO_EXCEPTION_RATE_SETTING = Setting.doubleSetting("index.store.mock.random.io_exception_rate", 0.0d,  0.0d, false, Setting.Scope.INDEX);
-    public static final Setting RANDOM_PREVENT_DOUBLE_WRITE_SETTING = Setting.boolSetting("index.store.mock.random.prevent_double_write", true, false, Setting.Scope.INDEX);// true is default in MDW
-    public static final Setting RANDOM_NO_DELETE_OPEN_FILE_SETTING = Setting.boolSetting("index.store.mock.random.no_delete_open_file", true, false, Setting.Scope.INDEX);// true is default in MDW
-    public static final Setting CRASH_INDEX_SETTING = Setting.boolSetting("index.store.mock.random.crash_index", true, false, Setting.Scope.INDEX);// true is default in MDW
+    public static final Setting RANDOM_IO_EXCEPTION_RATE_ON_OPEN_SETTING =
+        Setting.doubleSetting("index.store.mock.random.io_exception_rate_on_open", 0.0d,  0.0d, Property.IndexScope);
+    public static final Setting RANDOM_IO_EXCEPTION_RATE_SETTING =
+        Setting.doubleSetting("index.store.mock.random.io_exception_rate", 0.0d,  0.0d, Property.IndexScope);
+    public static final Setting RANDOM_PREVENT_DOUBLE_WRITE_SETTING =
+        Setting.boolSetting("index.store.mock.random.prevent_double_write", true, Property.IndexScope);// true is default in MDW
+    public static final Setting RANDOM_NO_DELETE_OPEN_FILE_SETTING =
+        Setting.boolSetting("index.store.mock.random.no_delete_open_file", true, Property.IndexScope);// true is default in MDW
+    public static final Setting CRASH_INDEX_SETTING =
+        Setting.boolSetting("index.store.mock.random.crash_index", true, Property.IndexScope);// true is default in MDW
 
     private final FsDirectoryService delegateService;
     private final Random random;
diff --git a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java
index 80251d54951..44e3ad598eb 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java
@@ -23,6 +23,7 @@ import org.elasticsearch.common.Nullable;
 import org.elasticsearch.common.logging.ESLogger;
 import org.elasticsearch.common.logging.Loggers;
 import org.elasticsearch.common.settings.Setting;
+import org.elasticsearch.common.settings.Setting.Property;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.settings.SettingsModule;
 import org.elasticsearch.index.IndexModule;
@@ -44,7 +45,8 @@ import java.util.Map;
 
 public class MockFSIndexStore extends IndexStore {
 
-    public static final Setting INDEX_CHECK_INDEX_ON_CLOSE_SETTING = Setting.boolSetting("index.store.mock.check_index_on_close", true, false, Setting.Scope.INDEX);
+    public static final Setting INDEX_CHECK_INDEX_ON_CLOSE_SETTING =
+        Setting.boolSetting("index.store.mock.check_index_on_close", true, Property.IndexScope);
 
     public static class TestPlugin extends Plugin {
         @Override
diff --git a/test/framework/src/main/java/org/elasticsearch/test/tasks/MockTaskManager.java b/test/framework/src/main/java/org/elasticsearch/test/tasks/MockTaskManager.java
index 4c48f990d6a..fc090e151a3 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/tasks/MockTaskManager.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/tasks/MockTaskManager.java
@@ -20,6 +20,7 @@
 package org.elasticsearch.test.tasks;
 
 import org.elasticsearch.common.settings.Setting;
+import org.elasticsearch.common.settings.Setting.Property;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.tasks.Task;
 import org.elasticsearch.tasks.TaskManager;
@@ -33,7 +34,8 @@ import java.util.concurrent.CopyOnWriteArrayList;
  */
 public class MockTaskManager extends TaskManager {
 
-    public static final Setting USE_MOCK_TASK_MANAGER_SETTING = Setting.boolSetting("tests.mock.taskmanager.enabled", false, false, Setting.Scope.CLUSTER);
+    public static final Setting USE_MOCK_TASK_MANAGER_SETTING =
+        Setting.boolSetting("tests.mock.taskmanager.enabled", false, Property.NodeScope);
 
     private final Collection listeners = new CopyOnWriteArrayList<>();
 
diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/AssertingLocalTransport.java b/test/framework/src/main/java/org/elasticsearch/test/transport/AssertingLocalTransport.java
index fb310239155..322882a7b3c 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/transport/AssertingLocalTransport.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/transport/AssertingLocalTransport.java
@@ -25,6 +25,7 @@ import org.elasticsearch.common.inject.Inject;
 import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
 import org.elasticsearch.common.network.NetworkModule;
 import org.elasticsearch.common.settings.Setting;
+import org.elasticsearch.common.settings.Setting.Property;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.settings.SettingsModule;
 import org.elasticsearch.plugins.Plugin;
@@ -67,10 +68,12 @@ public class AssertingLocalTransport extends LocalTransport {
         }
     }
 
-    public static final Setting ASSERTING_TRANSPORT_MIN_VERSION_KEY = new Setting<>("transport.asserting.version.min",
-            Integer.toString(Version.CURRENT.minimumCompatibilityVersion().id), (s) -> Version.fromId(Integer.parseInt(s)), false, Setting.Scope.CLUSTER);
-    public static final Setting ASSERTING_TRANSPORT_MAX_VERSION_KEY = new Setting<>("transport.asserting.version.max",
-        Integer.toString(Version.CURRENT.id), (s) -> Version.fromId(Integer.parseInt(s)), false, Setting.Scope.CLUSTER);
+    public static final Setting ASSERTING_TRANSPORT_MIN_VERSION_KEY =
+        new Setting<>("transport.asserting.version.min", Integer.toString(Version.CURRENT.minimumCompatibilityVersion().id),
+            (s) -> Version.fromId(Integer.parseInt(s)), Property.NodeScope);
+    public static final Setting ASSERTING_TRANSPORT_MAX_VERSION_KEY =
+        new Setting<>("transport.asserting.version.max", Integer.toString(Version.CURRENT.id),
+            (s) -> Version.fromId(Integer.parseInt(s)), Property.NodeScope);
     private final Random random;
     private final Version minVersion;
     private final Version maxVersion;
diff --git a/test/logger-usage/build.gradle b/test/logger-usage/build.gradle
new file mode 100644
index 00000000000..1a5815cf76e
--- /dev/null
+++ b/test/logger-usage/build.gradle
@@ -0,0 +1,33 @@
+import org.elasticsearch.gradle.precommit.PrecommitTasks
+
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+dependencies {
+  compile 'org.ow2.asm:asm-debug-all:5.0.4' // use asm-debug-all as asm-all is broken
+  testCompile "org.elasticsearch.test:framework:${version}"
+}
+
+loggerUsageCheck.enabled = false
+
+forbiddenApisMain.enabled = true // disabled by parent project
+forbiddenApisMain {
+  signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] // does not depend on core, only jdk signatures
+}
+jarHell.enabled = true // disabled by parent project
\ No newline at end of file
diff --git a/test/logger-usage/src/main/java/org/elasticsearch/test/loggerusage/ESLoggerUsageChecker.java b/test/logger-usage/src/main/java/org/elasticsearch/test/loggerusage/ESLoggerUsageChecker.java
new file mode 100644
index 00000000000..57ec37cb695
--- /dev/null
+++ b/test/logger-usage/src/main/java/org/elasticsearch/test/loggerusage/ESLoggerUsageChecker.java
@@ -0,0 +1,460 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.loggerusage;
+
+import org.objectweb.asm.AnnotationVisitor;
+import org.objectweb.asm.ClassReader;
+import org.objectweb.asm.ClassVisitor;
+import org.objectweb.asm.MethodVisitor;
+import org.objectweb.asm.Opcodes;
+import org.objectweb.asm.Type;
+import org.objectweb.asm.tree.AbstractInsnNode;
+import org.objectweb.asm.tree.IntInsnNode;
+import org.objectweb.asm.tree.LdcInsnNode;
+import org.objectweb.asm.tree.LineNumberNode;
+import org.objectweb.asm.tree.MethodInsnNode;
+import org.objectweb.asm.tree.MethodNode;
+import org.objectweb.asm.tree.TypeInsnNode;
+import org.objectweb.asm.tree.analysis.Analyzer;
+import org.objectweb.asm.tree.analysis.AnalyzerException;
+import org.objectweb.asm.tree.analysis.BasicInterpreter;
+import org.objectweb.asm.tree.analysis.BasicValue;
+import org.objectweb.asm.tree.analysis.Frame;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.file.FileVisitResult;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.nio.file.SimpleFileVisitor;
+import java.nio.file.attribute.BasicFileAttributes;
+import java.util.Arrays;
+import java.util.List;
+import java.util.function.Consumer;
+import java.util.function.Predicate;
+
+public class ESLoggerUsageChecker {
+    public static final String LOGGER_CLASS = "org.elasticsearch.common.logging.ESLogger";
+    public static final String THROWABLE_CLASS = "java.lang.Throwable";
+    public static final List LOGGER_METHODS = Arrays.asList("trace", "debug", "info", "warn", "error");
+    public static final String IGNORE_CHECKS_ANNOTATION = "org.elasticsearch.common.SuppressLoggerChecks";
+
+    public static void main(String... args) throws Exception {
+        System.out.println("checking for wrong usages of ESLogger...");
+        boolean[] wrongUsageFound = new boolean[1];
+        checkLoggerUsage(wrongLoggerUsage -> {
+            System.err.println(wrongLoggerUsage.getErrorLines());
+            wrongUsageFound[0] = true;
+        }, args);
+        if (wrongUsageFound[0]) {
+            throw new Exception("Wrong logger usages found");
+        } else {
+            System.out.println("No wrong usages found");
+        }
+    }
+
+    private static void checkLoggerUsage(Consumer wrongUsageCallback, String... classDirectories)
+        throws IOException {
+        for (String classDirectory : classDirectories) {
+            Path root = Paths.get(classDirectory);
+            if (Files.isDirectory(root) == false) {
+                throw new IllegalArgumentException(root + " should be an existing directory");
+            }
+            Files.walkFileTree(root, new SimpleFileVisitor() {
+                @Override
+                public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
+                    if (Files.isRegularFile(file) && file.endsWith(".class")) {
+                        try (InputStream in = Files.newInputStream(file)) {
+                            ESLoggerUsageChecker.check(wrongUsageCallback, in);
+                        }
+                    }
+                    return super.visitFile(file, attrs);
+                }
+            });
+        }
+    }
+
+    public static void check(Consumer wrongUsageCallback, InputStream inputStream) throws IOException {
+        check(wrongUsageCallback, inputStream, s -> true);
+    }
+
+    // used by tests
+    static void check(Consumer wrongUsageCallback, InputStream inputStream, Predicate methodsToCheck)
+        throws IOException {
+        ClassReader cr = new ClassReader(inputStream);
+        cr.accept(new ClassChecker(wrongUsageCallback, methodsToCheck), 0);
+    }
+
+    public static class WrongLoggerUsage {
+        private final String className;
+        private final String methodName;
+        private final String logMethodName;
+        private final int line;
+        private final String errorMessage;
+
+        public WrongLoggerUsage(String className, String methodName, String logMethodName, int line, String errorMessage) {
+            this.className = className;
+            this.methodName = methodName;
+            this.logMethodName = logMethodName;
+            this.line = line;
+            this.errorMessage = errorMessage;
+        }
+
+        @Override
+        public String toString() {
+            return "WrongLoggerUsage{" +
+                "className='" + className + '\'' +
+                ", methodName='" + methodName + '\'' +
+                ", logMethodName='" + logMethodName + '\'' +
+                ", line=" + line +
+                ", errorMessage='" + errorMessage + '\'' +
+                '}';
+        }
+
+        /**
+         * Returns an error message that has the form of stack traces emitted by {@link Throwable#printStackTrace}
+         */
+        public String getErrorLines() {
+            String fullClassName = Type.getObjectType(className).getClassName();
+            String simpleClassName = fullClassName.substring(fullClassName.lastIndexOf(".") + 1, fullClassName.length());
+            int innerClassIndex = simpleClassName.indexOf("$");
+            if (innerClassIndex > 0) {
+                simpleClassName = simpleClassName.substring(0, innerClassIndex);
+            }
+            simpleClassName = simpleClassName + ".java";
+            StringBuilder sb = new StringBuilder();
+            sb.append("Bad usage of ");
+            sb.append(LOGGER_CLASS).append("#").append(logMethodName);
+            sb.append(": ");
+            sb.append(errorMessage);
+            sb.append("\n\tat ");
+            sb.append(fullClassName);
+            sb.append(".");
+            sb.append(methodName);
+            sb.append("(");
+            sb.append(simpleClassName);
+            sb.append(":");
+            sb.append(line);
+            sb.append(")");
+            return sb.toString();
+        }
+    }
+
+    private static class ClassChecker extends ClassVisitor {
+        private String className;
+        private boolean ignoreChecks;
+        private final Consumer wrongUsageCallback;
+        private final Predicate methodsToCheck;
+
+        public ClassChecker(Consumer wrongUsageCallback, Predicate methodsToCheck) {
+            super(Opcodes.ASM5);
+            this.wrongUsageCallback = wrongUsageCallback;
+            this.methodsToCheck = methodsToCheck;
+        }
+
+        @Override
+        public void visit(int version, int access, String name, String signature, String superName, String[] interfaces) {
+            this.className = name;
+        }
+
+        @Override
+        public AnnotationVisitor visitAnnotation(String desc, boolean visible) {
+            if (IGNORE_CHECKS_ANNOTATION.equals(Type.getType(desc).getClassName())) {
+                ignoreChecks = true;
+            }
+            return super.visitAnnotation(desc, visible);
+        }
+
+        @Override
+        public MethodVisitor visitMethod(int access, String name, String desc, String signature, String[] exceptions) {
+            if (ignoreChecks == false && methodsToCheck.test(name)) {
+                return new MethodChecker(this.className, access, name, desc, wrongUsageCallback);
+            } else {
+                return super.visitMethod(access, name, desc, signature, exceptions);
+            }
+        }
+    }
+
+    private static class MethodChecker extends MethodVisitor {
+        private final String className;
+        private final Consumer wrongUsageCallback;
+        private boolean ignoreChecks;
+
+        public MethodChecker(String className, int access, String name, String desc, Consumer wrongUsageCallback) {
+            super(Opcodes.ASM5, new MethodNode(access, name, desc, null, null));
+            this.className = className;
+            this.wrongUsageCallback = wrongUsageCallback;
+        }
+
+        @Override
+        public AnnotationVisitor visitAnnotation(String desc, boolean visible) {
+            if (IGNORE_CHECKS_ANNOTATION.equals(Type.getType(desc).getClassName())) {
+                ignoreChecks = true;
+            }
+            return super.visitAnnotation(desc, visible);
+        }
+
+        @Override
+        public void visitEnd() {
+            if (ignoreChecks == false) {
+                findBadLoggerUsages((MethodNode) mv);
+            }
+            super.visitEnd();
+        }
+
+        public void findBadLoggerUsages(MethodNode methodNode) {
+            Analyzer stringPlaceHolderAnalyzer = new Analyzer<>(new PlaceHolderStringInterpreter());
+            Analyzer arraySizeAnalyzer = new Analyzer<>(new ArraySizeInterpreter());
+            try {
+                stringPlaceHolderAnalyzer.analyze(className, methodNode);
+                arraySizeAnalyzer.analyze(className, methodNode);
+            } catch (AnalyzerException e) {
+                throw new RuntimeException("Internal error: failed in analysis step", e);
+            }
+            Frame[] stringFrames = stringPlaceHolderAnalyzer.getFrames();
+            Frame[] arraySizeFrames = arraySizeAnalyzer.getFrames();
+            AbstractInsnNode[] insns = methodNode.instructions.toArray();
+            int lineNumber = -1;
+            for (int i = 0; i < insns.length; i++) {
+                AbstractInsnNode insn = insns[i];
+                if (insn instanceof LineNumberNode) {
+                    LineNumberNode lineNumberNode = (LineNumberNode) insn;
+                    lineNumber = lineNumberNode.line;
+                }
+                if (insn.getOpcode() == Opcodes.INVOKEVIRTUAL) {
+                    MethodInsnNode methodInsn = (MethodInsnNode) insn;
+                    if (Type.getObjectType(methodInsn.owner).getClassName().equals(LOGGER_CLASS) == false) {
+                        continue;
+                    }
+                    if (LOGGER_METHODS.contains(methodInsn.name) == false) {
+                        continue;
+                    }
+                    Type[] argumentTypes = Type.getArgumentTypes(methodInsn.desc);
+                    BasicValue logMessageLengthObject = getStackValue(stringFrames[i], argumentTypes.length - 1); // first argument
+                    if (logMessageLengthObject instanceof PlaceHolderStringBasicValue == false) {
+                        wrongUsageCallback.accept(new WrongLoggerUsage(className, methodNode.name, methodInsn.name, lineNumber,
+                            "First argument must be a string constant so that we can statically ensure proper place holder usage"));
+                        continue;
+                    }
+                    PlaceHolderStringBasicValue logMessageLength = (PlaceHolderStringBasicValue) logMessageLengthObject;
+                    if (logMessageLength.minValue != logMessageLength.maxValue) {
+                        wrongUsageCallback.accept(new WrongLoggerUsage(className, methodNode.name, methodInsn.name, lineNumber,
+                            "Multiple log messages with conflicting number of place holders"));
+                        continue;
+                    }
+                    BasicValue varArgsSizeObject = getStackValue(arraySizeFrames[i], 0); // last argument
+                    if (varArgsSizeObject instanceof ArraySizeBasicValue == false) {
+                        wrongUsageCallback.accept(new WrongLoggerUsage(className, methodNode.name, methodInsn.name, lineNumber,
+                            "Could not determine size of varargs array"));
+                        continue;
+                    }
+                    ArraySizeBasicValue varArgsSize = (ArraySizeBasicValue) varArgsSizeObject;
+                    if (varArgsSize.minValue != varArgsSize.maxValue) {
+                        wrongUsageCallback.accept(new WrongLoggerUsage(className, methodNode.name, methodInsn.name, lineNumber,
+                            "Multiple parameter arrays with conflicting sizes"));
+                        continue;
+                    }
+                    assert logMessageLength.minValue == logMessageLength.maxValue && varArgsSize.minValue == varArgsSize.maxValue;
+                    if (logMessageLength.minValue != varArgsSize.minValue) {
+                        wrongUsageCallback.accept(new WrongLoggerUsage(className, methodNode.name, methodInsn.name, lineNumber,
+                            "Expected " + logMessageLength.minValue + " arguments but got " + varArgsSize.minValue));
+                        continue;
+                    }
+                }
+            }
+        }
+    }
+
+    private static int calculateNumberOfPlaceHolders(String message) {
+        int count = 0;
+        for (int i = 1; i < message.length(); i++) {
+            if (message.charAt(i - 1) == '{' && message.charAt(i) == '}') {
+                count++;
+                i += 1;
+            }
+        }
+        return count;
+    }
+
+    private static BasicValue getStackValue(Frame f, int index) {
+        int top = f.getStackSize() - 1;
+        return index <= top ? f.getStack(top - index) : null;
+    }
+
+    private static class IntMinMaxTrackingBasicValue extends BasicValue {
+        protected final int minValue;
+        protected final int maxValue;
+
+        public IntMinMaxTrackingBasicValue(Type type, int value) {
+            super(type);
+            this.minValue = value;
+            this.maxValue = value;
+        }
+
+        public IntMinMaxTrackingBasicValue(Type type, int minValue, int maxValue) {
+            super(type);
+            this.minValue = minValue;
+            this.maxValue = maxValue;
+        }
+
+        @Override
+        public boolean equals(Object o) {
+            if (this == o) return true;
+            if (o == null || getClass() != o.getClass()) return false;
+            if (!super.equals(o)) return false;
+
+            IntMinMaxTrackingBasicValue that = (IntMinMaxTrackingBasicValue) o;
+
+            if (minValue != that.minValue) return false;
+            return maxValue == that.maxValue;
+
+        }
+
+        @Override
+        public int hashCode() {
+            int result = super.hashCode();
+            result = 31 * result + minValue;
+            result = 31 * result + maxValue;
+            return result;
+        }
+
+        @Override
+        public String toString() {
+            return "IntMinMaxTrackingBasicValue{" +
+                "minValue=" + minValue +
+                ", maxValue=" + maxValue +
+                '}';
+        }
+    }
+
+    private static final class PlaceHolderStringBasicValue extends IntMinMaxTrackingBasicValue {
+        public static final Type STRING_OBJECT_TYPE = Type.getObjectType("java/lang/String");
+
+        public PlaceHolderStringBasicValue(int placeHolders) {
+            super(STRING_OBJECT_TYPE, placeHolders);
+        }
+
+        public PlaceHolderStringBasicValue(int minPlaceHolders, int maxPlaceHolders) {
+            super(STRING_OBJECT_TYPE, minPlaceHolders, maxPlaceHolders);
+        }
+    }
+
+    private static final class ArraySizeBasicValue extends IntMinMaxTrackingBasicValue {
+        public ArraySizeBasicValue(Type type, int minArraySize, int maxArraySize) {
+            super(type, minArraySize, maxArraySize);
+        }
+    }
+
+    private static final class IntegerConstantBasicValue extends IntMinMaxTrackingBasicValue {
+        public IntegerConstantBasicValue(Type type, int constant) {
+            super(type, constant);
+        }
+
+        public IntegerConstantBasicValue(Type type, int minConstant, int maxConstant) {
+            super(type, minConstant, maxConstant);
+        }
+    }
+
+    private static final class PlaceHolderStringInterpreter extends BasicInterpreter {
+        @Override
+        public BasicValue newOperation(AbstractInsnNode insnNode) throws AnalyzerException {
+            if (insnNode.getOpcode() == Opcodes.LDC) {
+                Object constant = ((LdcInsnNode) insnNode).cst;
+                if (constant instanceof String) {
+                    return new PlaceHolderStringBasicValue(calculateNumberOfPlaceHolders((String) constant));
+                }
+            }
+            return super.newOperation(insnNode);
+        }
+
+        @Override
+        public BasicValue merge(BasicValue value1, BasicValue value2) {
+            if (value1 instanceof PlaceHolderStringBasicValue && value2 instanceof PlaceHolderStringBasicValue
+                && value1.equals(value2) == false) {
+                PlaceHolderStringBasicValue c1 = (PlaceHolderStringBasicValue) value1;
+                PlaceHolderStringBasicValue c2 = (PlaceHolderStringBasicValue) value2;
+                return new PlaceHolderStringBasicValue(Math.min(c1.minValue, c2.minValue), Math.max(c1.maxValue, c2.maxValue));
+            }
+            return super.merge(value1, value2);
+        }
+    }
+
+    private static final class ArraySizeInterpreter extends BasicInterpreter {
+        @Override
+        public BasicValue newOperation(AbstractInsnNode insnNode) throws AnalyzerException {
+            switch (insnNode.getOpcode()) {
+                case ICONST_0: return new IntegerConstantBasicValue(Type.INT_TYPE, 0);
+                case ICONST_1: return new IntegerConstantBasicValue(Type.INT_TYPE, 1);
+                case ICONST_2: return new IntegerConstantBasicValue(Type.INT_TYPE, 2);
+                case ICONST_3: return new IntegerConstantBasicValue(Type.INT_TYPE, 3);
+                case ICONST_4: return new IntegerConstantBasicValue(Type.INT_TYPE, 4);
+                case ICONST_5: return new IntegerConstantBasicValue(Type.INT_TYPE, 5);
+                case BIPUSH:
+                case SIPUSH: return new IntegerConstantBasicValue(Type.INT_TYPE, ((IntInsnNode)insnNode).operand);
+                case Opcodes.LDC: {
+                    Object constant = ((LdcInsnNode)insnNode).cst;
+                    if (constant instanceof Integer) {
+                        return new IntegerConstantBasicValue(Type.INT_TYPE, (Integer)constant);
+                    } else {
+                        return super.newOperation(insnNode);
+                    }
+                }
+                default: return super.newOperation(insnNode);
+            }
+        }
+
+        @Override
+        public BasicValue merge(BasicValue value1, BasicValue value2) {
+            if (value1 instanceof IntegerConstantBasicValue && value2 instanceof IntegerConstantBasicValue) {
+                IntegerConstantBasicValue c1 = (IntegerConstantBasicValue) value1;
+                IntegerConstantBasicValue c2 = (IntegerConstantBasicValue) value2;
+                return new IntegerConstantBasicValue(Type.INT_TYPE, Math.min(c1.minValue, c2.minValue), Math.max(c1.maxValue, c2.maxValue));
+            } else if (value1 instanceof ArraySizeBasicValue && value2 instanceof ArraySizeBasicValue) {
+                ArraySizeBasicValue c1 = (ArraySizeBasicValue) value1;
+                ArraySizeBasicValue c2 = (ArraySizeBasicValue) value2;
+                return new ArraySizeBasicValue(Type.INT_TYPE, Math.min(c1.minValue, c2.minValue), Math.max(c1.maxValue, c2.maxValue));
+            }
+            return super.merge(value1, value2);
+        }
+
+        @Override
+        public BasicValue unaryOperation(AbstractInsnNode insnNode, BasicValue value) throws AnalyzerException {
+            if (insnNode.getOpcode() == Opcodes.ANEWARRAY && value instanceof IntegerConstantBasicValue) {
+                IntegerConstantBasicValue constantBasicValue = (IntegerConstantBasicValue) value;
+                String desc = ((TypeInsnNode) insnNode).desc;
+                return new ArraySizeBasicValue(Type.getType("[" + Type.getObjectType(desc)), constantBasicValue.minValue,
+                    constantBasicValue.maxValue);
+            }
+            return super.unaryOperation(insnNode, value);
+        }
+
+        @Override
+        public BasicValue ternaryOperation(AbstractInsnNode insnNode, BasicValue value1, BasicValue value2, BasicValue value3)
+            throws AnalyzerException {
+            if (insnNode.getOpcode() == Opcodes.AASTORE && value1 instanceof ArraySizeBasicValue) {
+                return value1;
+            }
+            return super.ternaryOperation(insnNode, value1, value2, value3);
+        }
+    }
+}
diff --git a/test/logger-usage/src/test/java/org/elasticsearch/test/loggerusage/ESLoggerUsageTests.java b/test/logger-usage/src/test/java/org/elasticsearch/test/loggerusage/ESLoggerUsageTests.java
new file mode 100644
index 00000000000..ab07ecbf45e
--- /dev/null
+++ b/test/logger-usage/src/test/java/org/elasticsearch/test/loggerusage/ESLoggerUsageTests.java
@@ -0,0 +1,165 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.loggerusage;
+
+import org.elasticsearch.common.SuppressLoggerChecks;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.test.loggerusage.ESLoggerUsageChecker.WrongLoggerUsage;
+import org.elasticsearch.test.ESTestCase;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.function.Predicate;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.hasItem;
+import static org.hamcrest.Matchers.notNullValue;
+
+public class ESLoggerUsageTests extends ESTestCase {
+
+    public void testLoggerUsageChecks() throws IOException {
+        for (Method method : getClass().getMethods()) {
+            if (method.getDeclaringClass().equals(getClass())) {
+                if (method.getName().startsWith("check")) {
+                    logger.info("Checking logger usage for method {}", method.getName());
+                    InputStream classInputStream = getClass().getResourceAsStream(getClass().getSimpleName() + ".class");
+                    List errors = new ArrayList<>();
+                    ESLoggerUsageChecker.check(errors::add, classInputStream, Predicate.isEqual(method.getName()));
+                    if (method.getName().startsWith("checkFail")) {
+                        assertFalse("Expected " + method.getName() + " to have wrong ESLogger usage", errors.isEmpty());
+                    } else {
+                        assertTrue("Method " + method.getName() + " has unexpected ESLogger usage errors: " + errors, errors.isEmpty());
+                    }
+                } else {
+                    assertTrue("only allow methods starting with test or check in this class", method.getName().startsWith("test"));
+                }
+            }
+        }
+    }
+
+    public void testLoggerUsageCheckerCompatibilityWithESLogger() throws NoSuchMethodException {
+        assertThat(ESLoggerUsageChecker.LOGGER_CLASS, equalTo(ESLogger.class.getName()));
+        assertThat(ESLoggerUsageChecker.THROWABLE_CLASS, equalTo(Throwable.class.getName()));
+        int varargsMethodCount = 0;
+        for (Method method : ESLogger.class.getMethods()) {
+            if (method.isVarArgs()) {
+                // check that logger usage checks all varargs methods
+                assertThat(ESLoggerUsageChecker.LOGGER_METHODS, hasItem(method.getName()));
+                varargsMethodCount++;
+            }
+        }
+        // currently we have two overloaded methods for each of debug, info, ...
+        // if that changes, we might want to have another look at the usage checker
+        assertThat(varargsMethodCount, equalTo(ESLoggerUsageChecker.LOGGER_METHODS.size() * 2));
+
+        // check that signature is same as we expect in the usage checker
+        for (String methodName : ESLoggerUsageChecker.LOGGER_METHODS) {
+            assertThat(ESLogger.class.getMethod(methodName, String.class, Object[].class), notNullValue());
+            assertThat(ESLogger.class.getMethod(methodName, String.class, Throwable.class, Object[].class), notNullValue());
+        }
+    }
+
+    public void checkNumberOfArguments1() {
+        logger.info("Hello {}", "world");
+    }
+
+    public void checkFailNumberOfArguments1() {
+        logger.info("Hello {}");
+    }
+
+    @SuppressLoggerChecks(reason = "test ignore functionality")
+    public void checkIgnoreWhenAnnotationPresent() {
+        logger.info("Hello {}");
+    }
+
+    public void checkNumberOfArguments2() {
+        logger.info("Hello {}, {}, {}", "world", 2, "third argument");
+    }
+
+    public void checkFailNumberOfArguments2() {
+        logger.info("Hello {}, {}", "world", 2, "third argument");
+    }
+
+    public void checkNumberOfArguments3() {
+        // long argument list (> 5), emits different bytecode
+        logger.info("Hello {}, {}, {}, {}, {}, {}, {}", "world", 2, "third argument", 4, 5, 6, new String("last arg"));
+    }
+
+    public void checkFailNumberOfArguments3() {
+        logger.info("Hello {}, {}, {}, {}, {}, {}, {}", "world", 2, "third argument", 4, 5, 6, 7, new String("last arg"));
+    }
+
+    public void checkOrderOfExceptionArgument() {
+        logger.info("Hello", new Exception());
+    }
+
+    public void checkOrderOfExceptionArgument1() {
+        logger.info("Hello {}", new Exception(), "world");
+    }
+
+    public void checkFailOrderOfExceptionArgument1() {
+        logger.info("Hello {}", "world", new Exception());
+    }
+
+    public void checkOrderOfExceptionArgument2() {
+        logger.info("Hello {}, {}", new Exception(), "world", 42);
+    }
+
+    public void checkFailOrderOfExceptionArgument2() {
+        logger.info("Hello {}, {}", "world", 42, new Exception());
+    }
+
+    public void checkFailNonConstantMessage(boolean b) {
+        logger.info(Boolean.toString(b));
+    }
+
+    public void checkComplexUsage(boolean b) {
+        String message = "Hello {}, {}";
+        Object[] args = new Object[] { "world", 42 };
+        if (b) {
+            message = "also two args {}{}";
+            args = new Object[] { "world", 43 };
+        }
+        logger.info(message, args);
+    }
+
+    public void checkFailComplexUsage1(boolean b) {
+        String message = "Hello {}, {}";
+        Object[] args = new Object[] { "world", 42 };
+        if (b) {
+            message = "just one arg {}";
+            args = new Object[] { "world", 43 };
+        }
+        logger.info(message, args);
+    }
+
+    public void checkFailComplexUsage2(boolean b) {
+        String message = "Hello {}, {}";
+        Object[] args = new Object[] { "world", 42 };
+        if (b) {
+            message = "also two args {}{}";
+            args = new Object[] { "world", 43, "another argument" };
+        }
+        logger.info(message, args);
+    }
+}